metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "test_waste_free_smc.py",
"repo_name": "blackjax-devs/blackjax",
"repo_path": "blackjax_extracted/blackjax-main/tests/smc/test_waste_free_smc.py",
"type": "Python"
}
|
"""Test the tempered SMC steps and routine"""
import functools
import chex
import jax
import jax.numpy as jnp
import numpy as np
import pytest
from absl.testing import absltest
import blackjax
import blackjax.smc.resampling as resampling
from blackjax import adaptive_tempered_smc, tempered_smc
from blackjax.smc import extend_params
from blackjax.smc.waste_free import update_waste_free, waste_free_smc
from tests.smc import SMCLinearRegressionTestCase
from tests.smc.test_tempered_smc import inference_loop
class WasteFreeSMCTest(SMCLinearRegressionTestCase):
"""Test posterior mean estimate."""
def setUp(self):
super().setUp()
self.key = jax.random.key(42)
@chex.variants(with_jit=True)
def test_fixed_schedule_tempered_smc(self):
(
init_particles,
logprior_fn,
loglikelihood_fn,
) = self.particles_prior_loglikelihood()
num_tempering_steps = 10
lambda_schedule = np.logspace(-5, 0, num_tempering_steps)
hmc_init = blackjax.hmc.init
hmc_kernel = blackjax.hmc.build_kernel()
hmc_parameters = extend_params(
{
"step_size": 10e-2,
"inverse_mass_matrix": jnp.eye(2),
"num_integration_steps": 50,
},
)
tempering = tempered_smc(
logprior_fn,
loglikelihood_fn,
hmc_kernel,
hmc_init,
hmc_parameters,
resampling.systematic,
None,
waste_free_smc(100, 4),
)
init_state = tempering.init(init_particles)
smc_kernel = self.variant(tempering.step)
def body_fn(carry, lmbda):
i, state = carry
subkey = jax.random.fold_in(self.key, i)
new_state, info = smc_kernel(subkey, state, lmbda)
return (i + 1, new_state), (new_state, info)
(_, result), _ = jax.lax.scan(body_fn, (0, init_state), lambda_schedule)
self.assert_linear_regression_test_case(result)
@chex.variants(with_jit=True)
def test_adaptive_tempered_smc(self):
(
init_particles,
logprior_fn,
loglikelihood_fn,
) = self.particles_prior_loglikelihood()
hmc_init = blackjax.hmc.init
hmc_kernel = blackjax.hmc.build_kernel()
hmc_parameters = extend_params(
{
"step_size": 10e-2,
"inverse_mass_matrix": jnp.eye(2),
"num_integration_steps": 50,
},
)
tempering = adaptive_tempered_smc(
logprior_fn,
loglikelihood_fn,
hmc_kernel,
hmc_init,
hmc_parameters,
resampling.systematic,
0.5,
update_strategy=waste_free_smc(100, 4),
num_mcmc_steps=None,
)
init_state = tempering.init(init_particles)
n_iter, result, log_likelihood = self.variant(
functools.partial(inference_loop, tempering.step)
)(self.key, init_state)
self.assert_linear_regression_test_case(result)
class Update_waste_free_multivariate_particles(chex.TestCase):
@chex.variants(with_jit=True)
def test_update_waste_free_multivariate_particles(self):
"""
Given resampled multivariate particles,
when updating with waste free, they are joined
by the result of iterating the MCMC chain to
get a bigger set of particles.
"""
resampled_particles = np.ones((50, 3))
n_particles = 100
def normal_logdensity(x):
return jnp.log(
jax.scipy.stats.multivariate_normal.pdf(
x, mean=np.zeros(3), cov=np.diag(np.ones(3))
)
)
def rmh_proposal_distribution(rng_key, position):
return position + jax.random.normal(rng_key, (3,)) * 25.0
kernel = functools.partial(
blackjax.rmh.build_kernel(), transition_generator=rmh_proposal_distribution
)
init = blackjax.rmh.init
update, _ = waste_free_smc(n_particles, 2)(
init, normal_logdensity, kernel, n_particles
)
updated_particles, infos = self.variant(update)(
jax.random.split(jax.random.PRNGKey(10), 50), resampled_particles, {}
)
assert updated_particles.shape == (n_particles, 3)
def test_waste_free_set_num_mcmc_steps():
with pytest.raises(ValueError) as exc_info:
update_waste_free(
lambda x: x, lambda x: 1, lambda x: 1, 100, 10, 3, num_mcmc_steps=50
)
assert str(exc_info.value).startswith(
"Can't use waste free SMC with a num_mcmc_steps parameter"
)
def test_waste_free_p_non_divier():
with pytest.raises(ValueError) as exc_info:
waste_free_smc(100, 3)
assert str(exc_info.value).startswith("p must be a divider")
if __name__ == "__main__":
absltest.main()
|
blackjax-devsREPO_NAMEblackjaxPATH_START.@blackjax_extracted@blackjax-main@tests@smc@[email protected]_END.py
|
{
"filename": "_y0.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/box/_y0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="y0", parent_name="box", **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@box@[email protected]_END.py
|
{
"filename": "crd_trans.py",
"repo_name": "dullemond/radmc3d-2.0",
"repo_path": "radmc3d-2.0_extracted/radmc3d-2.0-master/python/radmc3dPy/radmc3dPy/crd_trans.py",
"type": "Python"
}
|
"""
This module contains functions for coordinate transformations (e.g. rotation).
For help on the syntax or functionality of each function see the help of the individual functions
"""
from __future__ import absolute_import
from __future__ import print_function
import traceback
try:
import numpy as np
except ImportError:
np = None
print(traceback.format_exc())
def ctransSph2Cart(crd=None, reverse=False):
"""Transform coordinates between spherical to cartesian systems
Parameters
----------
crd : ndarray
Three element array containing the input
coordinates [x,y,z] or [r,theta,phi] by default
the coordinates assumed to be in the cartesian system
reverse : bool
If True calculates the inverse transformation
(cartesian -> spherical). In this case crd should be [r,theta,phi]
Returns
-------
Returns a three element array containig the output coordinates [r,theta,phi] or [x,y,z]
"""
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if reverse is False:
r = crd[0]
theta = crd[1] + 1e-50
phi = crd[2]
x = np.sin(theta) * np.cos(phi) * r
y = np.sin(theta) * np.sin(phi) * r
z = np.cos(theta) * r
crdout = [x, y, z]
else:
x = crd[0]
y = crd[1]
z = crd[2]
r = np.sqrt(x**2 + y**2 + z**2)
phi = np.arccos(x / np.sqrt(x**2 + y**2) + 1e-90)
theta = np.arccos(z / r)
if y < 0.0:
phi = 2.0 * np.pi - phi
crdout = [r, theta, phi]
return crdout
def vtransSph2Cart(crd=None, v=None, reverse=False):
"""Transform velocities between spherical to cartesian systems
Parameters
----------
crd : ndarray
Three element array containing the input
coordinates [x,y,z] or [r,theta,phi] by default
the coordinates assumed to be in the cartesian system
v : ndarray
Three element array containing the input
velocities in the same coordinate system as crd
reverse : bool
If True it calculates the inverse trasnformation (cartesian -> spherical)
Returns
-------
Returns a three element array containg the output velocities [vr,vphi,vtheta] or [vx,vy,vz]
"""
# NOTE!!!!! The velocities in the spherical system are not angular velocities!!!!
# v[1] = dphi/dt * r * sin(theta)
# v[2] = dtheta/dt * r
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if v is None:
raise ValueError('Unknown v. Cannot transform vectors without knowing the vectors themselves.')
if reverse is False:
# r = crd[0]
theta = crd[1]
phi = crd[2]
vr = v[0]
vtheta = v[1]
vphi = v[2]
vx = vr * np.sin(theta) * np.cos(phi) - vphi * np.sin(phi) + vtheta * np.cos(theta) * np.cos(phi)
vy = vr * np.sin(theta) * np.sin(phi) + vphi * np.cos(phi) + vtheta * np.cos(theta) * np.sin(phi)
vz = vr * np.cos(theta) - vtheta * np.sin(theta)
vout = [vx, vy, vz]
else:
# crd_sph = ctrans_sph2cart(crd, reverse=True)
# r = crd_sph[0]
# theta = crd_sph[1]
# phi = crd_sph[2]
# a = [[np.sin(theta)*np.cos(phi), -np.sin(phi), np.cos(theta)*np.cos(phi)],\
# [np.sin(theta)*np.sin(phi), np.cos(phi), np.cos(theta)*np.sin(phi)],\
# [np.cos(theta), 0., -np.sin(theta)]]
# a = [[np.sin(theta)*np.cos(phi), np.cos(theta)*np.cos(phi), -np.sin(phi)],\
# [np.sin(theta)*np.sin(phi), np.cos(theta)*np.sin(phi), np.cos(phi)],\
# [np.cos(theta), -np.sin(theta),0.]]
# a = np.array(a, dtype=np.float64)
# vout = np.linalg.solve(a,v)
#
# New stuff
#
vout = np.zeros(3, dtype=float)
r = np.sqrt((crd**2).sum())
rc = np.sqrt(crd[0]**2 + crd[1]**2)
# Vr
vout[0] = (crd * v).sum() / r
# Vtheta
vout[1] = (crd[2] * (crd[0] * v[0] + crd[1] * v[1]) - v[2] * rc**2) / (r * rc)
# Vphi
vout[2] = (crd[0] * v[1] - crd[1] * v[0]) / rc
return vout
def csrot(crd=None, ang=None, xang=0.0, yang=0.0, zang=0.0, deg=False):
""" Performs coordinate system rotation.
Parameters
----------
crd : numpy ndarray
Three element vector containing the coordinates of a given point in a cartesian system
ang : list, ndarray
Three element list/ndarray describing the rotation angles around the x, y and z axes, respectively
xang: float
Rotation around the x-axis
yang: float
Rotation around the y-axis
zang: float
Rotation around the z-axis
deg : float, optional
If True angles should be given in degree instead of radians (as by default)
Returns
-------
list
Returns a three element list with the rotated coordinates
Notes
-----
Rotation matrices
Around the x-axis:
.. math::
\\left(\\begin{matrix}
1 & 0 & 0 \\\\
0 & cos(\\alpha) & -sin(\\alpha)\\\\
0 & sin(\\alpha) & cos(\\alpha)
\\end{matrix}\\right)
Around the y-axis:
.. math::
\\left(\\begin{matrix}
cos(\\beta) & 0 & -sin(\\beta) \\\\
0 & 1 & 0\\\\
sin(\\beta)& 0 & cos(\\beta)
\\end{matrix}\\right)
Around the z-axis
.. math::
\\left(\\begin{matrix}
cos(\\gamma) & -sin\\gamma) & 0 \\\\
sin(\\gamma) & cos(\\gamma) & 0 \\\\
0 & 0 & 1
\\end{matrix}\\right)
"""
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if ang is None:
if (xang == 0.) & (yang == 0.) & (zang == 0.):
return crd
if ang is not None:
xang = ang[0]
yang = ang[1]
zang = ang[2]
#
# Convert degree into radian if the angles are given in degree
#
if deg:
xang = xang / 180.0 * np.pi
yang = yang / 180.0 * np.pi
zang = zang / 180.0 * np.pi
crd_new = np.zeros(len(crd), dtype=np.float64)
#
# Rotation around the x axis
#
if xang != 0.0:
dumx = crd[0]
dumy = np.cos(xang) * crd[1] - np.sin(xang) * crd[2]
dumz = np.sin(xang) * crd[1] + np.cos(xang) * crd[2]
crd_new = [dumx, dumy, dumz]
#
# Rotation around the y axis
#
if yang != 0.0:
dumx = np.cos(yang) * crd[0] + np.sin(yang) * crd[2]
dumy = crd[1]
dumz = -np.sin(yang) * crd[0] + np.cos(yang) * crd[2]
crd_new = [dumx, dumy, dumz]
#
# Rotation around the z axis
#
if zang != 0.0:
dumx = np.cos(zang) * crd[0] - np.sin(zang) * crd[1] + 0.0
dumy = np.sin(zang) * crd[0] + np.cos(zang) * crd[1] + 0.0
dumz = crd[2]
crd_new = [dumx, dumy, dumz]
return crd_new
def vrot(crd=None, v=None, ang=None):
"""Rotates a vector in spherical coordinate system.
First transforms the vector to cartesian coordinate system, then does the rotation then
makes the inverse transformation
Parameters
----------
crd : ndarray
Three element array containing the coordinates of a
given point in the cartesian system
v : ndarray
Three element array, angles of rotation around the x,y,z axes
ang : ndarray
Three element arrray containing the angles to rotate around the x, y, z, axes, respectively
"""
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if v is None:
raise ValueError('Unknown v. Vector rotation cannot be done without knowing the vectors themselves.')
if ang is None:
raise ValueError('Unknown ang. Vector rotation cannot be done without knowing the rotation angles.')
# Convert the position vector to cartesian coordinate system
crd_xyz = ctransSph2Cart(crd=crd)
# Convert the velocity vector to cartesian coordinate system
v_xyz = vtransSph2Cart(crd=crd, v=v)
# Rotate the vector
v_xyz_rot = csrot(crd=v_xyz, ang=ang)
# Transform the rotated vector back to the spherical coordinate system
v_rot = vtransSph2Cart(crd=crd_xyz, v=v_xyz_rot, reverse=True)
return v_rot
|
[email protected][email protected]@python@radmc3dPy@radmc3dPy@[email protected]_END.py
|
{
"filename": "test_st_prompt_list.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/models/strategies/tests/test_st_prompt_list.py",
"type": "Python"
}
|
from typing import Optional, TypedDict
from hypothesis import Phase, given, note, settings
from hypothesis import strategies as st
from nextline_test_utils import safe_compare as sc
from nextline_test_utils.strategies import st_none_or, st_ranges
from ... import Model, Run
from .. import (
st_model_prompt_list,
st_model_run,
st_model_trace_call_list,
st_model_trace_list,
)
from .funcs import assert_model_persistence
class StModelPromptListKwargs(TypedDict, total=False):
run: Optional[Run]
min_size: int
max_size: Optional[int]
@st.composite
def st_st_model_prompt_list_kwargs(draw: st.DrawFn) -> StModelPromptListKwargs:
kwargs = StModelPromptListKwargs()
if draw(st.booleans()):
# generate_traces=False because True would generate a trace with prompts
run = draw(st_none_or(st_model_run(generate_traces=False)))
kwargs['run'] = run
if run:
draw(st_none_or(st_model_trace_list(run=run, min_size=0, max_size=3)))
draw(st_none_or(st_model_trace_call_list(run=run, min_size=0, max_size=5)))
if draw(st.booleans()):
min_size, max_size = draw(
st_ranges(
st.integers,
min_start=0,
max_end=4,
allow_start_none=False,
allow_end_none=False,
)
)
assert isinstance(min_size, int)
kwargs['min_size'] = min_size
kwargs['max_size'] = max_size
return kwargs
@given(kwargs=st_st_model_prompt_list_kwargs())
def test_st_model_prompt_list_kwargs(kwargs: StModelPromptListKwargs) -> None:
assert sc(kwargs.get('min_size')) <= sc(kwargs.get('max_size'))
@settings(max_examples=500, phases=(Phase.generate,)) # Avoid shrinking
@given(st.data())
async def test_options(data: st.DataObject) -> None:
# Generate options of the strategy to be tested
kwargs = data.draw(st_st_model_prompt_list_kwargs())
note(kwargs)
# Call the strategy to be tested
prompts = data.draw(st_model_prompt_list(**kwargs))
# Assert the generated values
run = kwargs.get('run')
min_size = kwargs.get('min_size', 0)
max_size = kwargs.get('max_size')
if run and not run.trace_calls:
# `prompts` is not generated if `run` with no `trace_calls` is provided
assert not prompts
else:
assert min_size <= len(prompts) <= sc(max_size)
if prompts:
runs = set(prompt.trace.run for prompt in prompts)
assert len(runs) == 1
assert run is None or run is runs.pop()
@settings(phases=(Phase.generate,)) # Avoid shrinking
@given(instances=st_model_prompt_list(max_size=5))
async def test_db(instances: list[Model]) -> None:
await assert_model_persistence(instances)
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@models@strategies@tests@[email protected]_END.py
|
{
"filename": "helpers.py",
"repo_name": "spacetelescope/calcos",
"repo_path": "calcos_extracted/calcos-master/tests/helpers.py",
"type": "Python"
}
|
"""CALCOS regression test helpers."""
import os
import sys
import pytest
from ci_watson.artifactory_helpers import get_bigdata
from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds
from astropy.io import fits
from astropy.io.fits import FITSDiff
__all__ = ['calref_from_image', 'BaseCOS']
def calref_from_image(input_image):
"""
Return a list of reference filenames, as defined in the primary
header of the given input image, necessary for calibration; i.e.,
only those associated with ``*CORR`` set to ``PERFORM`` will be
considered.
"""
# NOTE: Add additional mapping as needed.
# Map mandatory CRDS reference file for instrument/detector combo.
# This is for file not tied to any particular *CORR or used throughout.
det_lookup = {
('COS', 'FUV'): ['PROFTAB', 'SPWCSTAB'],
('COS', 'NUV'): []}
# NOTE: Add additional mapping as needed.
# Map *CORR to associated CRDS reference file.
corr_lookup = {
'BADTCORR': ['BADTTAB'],
'TEMPCORR': ['BRFTAB'],
'GEOCORR': ['GEOFILE'],
'DGEOCORR': ['DGEOFILE'],
'YWLKCORR': ['YWLKFILE'],
'XWLKCORR': ['XWLKFILE'],
'DEADCORR': ['DEADTAB'],
'PHACORR': ['PHATAB', 'PHAFILE'],
'FLATCORR': ['FLATFILE'],
'WAVECORR': ['LAMPTAB', 'DISPTAB', 'TWOZXTAB', 'XTRACTAB'],
'BRSTCORR': ['BRSTTAB'],
'TRCECORR': ['TRACETAB'],
'ALGNCORR': ['TWOZXTAB'],
'DQICORR': ['SPOTTAB', 'TRACETAB', 'BPIXTAB', 'GSAGTAB'],
'X1DCORR': ['WCPTAB', 'TWOZXTAB', 'XTRACTAB'],
'BACKCORR': ['TWOZXTAB', 'XTRACTAB'],
'FLUXCORR': ['FLUXTAB', 'TDSTAB', 'PHOTTAB'],
'WALKCORR': ['WALKTAB']}
hdr = fits.getheader(input_image, ext=0)
ref_files = ref_from_image(
input_image, det_lookup[(hdr['INSTRUME'], hdr['DETECTOR'])])
for step in corr_lookup:
# Not all images have the CORR step and it is not always on.
if (step not in hdr) or (hdr[step].strip().upper() != 'PERFORM'):
continue
ref_files += ref_from_image(input_image, corr_lookup[step])
# Special case for STATFLAG=T, which requires XTRACTAB, but MissingRefFiles()
# doesn't know
if hdr['STATFLAG']:
ref_files += ref_from_image(input_image, ['XTRACTAB'])
return list(set(ref_files)) # Remove duplicates
# Base class for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
# NOTE: bigdata marker requires TEST_BIGDATA environment variable to
# point to a valid big data directory, whether locally or on Artifactory.
# NOTE: envopt would point tests to "dev" or "stable".
# NOTE: _jail fixture ensures each test runs in a clean tmpdir.
@pytest.mark.bigdata
@pytest.mark.usefixtures('_jail', 'envopt')
class BaseCOS:
instrument = 'cos'
ignore_keywords = ['DATE', 'CAL_VER']
# To be defined by test class in actual test modules.
detector = ''
@pytest.fixture(autouse=True)
def setup_class(self, envopt):
"""
Class-level setup that is done at the beginning of the test.
Parameters
----------
envopt : {'dev', 'stable'}
This is a ``pytest`` fixture that defines the test
environment in which input and truth files reside.
"""
# Since CALCOS still runs in PY2, need to check here because
# tests can only run in PY3.
if sys.version_info < (3, ):
raise SystemError('tests can only run in Python 3')
self.env = envopt
def get_input_files(self, filenames):
"""
Copy input files (ASN, RAW, etc) into the working directory.
If ASN is given, RAW files in the ASN table are also copied.
The associated CRDS reference files are also copied or
downloaded, if necessary.
Data directory layout for CALCOS::
detector/
input/
truth/
Parameters
----------
filename : list
List of filenames of the ASN/RAW/etc to copy over, along with their
associated files.
"""
all_raws = []
for file in filenames:
if 'rawtag' in file:
all_raws.append(file)
# List of filenames can include _rawtag, _asn and _spt files
dest = get_bigdata('scsb-calcos', self.env, self.detector, 'input',
file)
# If file is an association table, download raw files specified in the table
if file.endswith('_asn.fits'):
if self.detector == 'nuv':
asn_raws = raw_from_asn(file, '_rawtag.fits')
else:
asn_raws = raw_from_asn(file, '_rawtag_a.fits')
asn_raws += raw_from_asn(file, '_rawtag_b.fits')
for raw in asn_raws: # Download RAWs in ASN.
get_bigdata('scsb-calcos', self.env, self.detector, 'input',
raw)
all_raws += asn_raws
first_pass = ('JENKINS_URL' in os.environ and
'ssbjenkins' in os.environ['JENKINS_URL'])
for raw in all_raws:
ref_files = calref_from_image(raw)
for ref_file in ref_files:
print("Getting reference file {}".format(ref_file))
# Special reference files that live with inputs.
if ('$' not in ref_file and
os.path.basename(ref_file) == ref_file):
get_bigdata('scsb-calcos', self.env, self.detector,
'input', ref_file)
print('{} downloaded successfully')
continue
# Jenkins cannot see Central Storage on push event,
# and somehow setting, say, jref to "." does not work anymore.
# So, we need this hack.
if '$' in ref_file and first_pass:
first_pass = False
if not os.path.isdir('/grp/hst/cdbs'):
ref_path = os.path.dirname(dest) + os.sep
var = ref_file.split('$')[0]
os.environ[var] = ref_path # hacky hack hack
# Download reference files, if needed only.
download_crds(ref_file, verbose=True)
def compare_outputs(self, outputs, atol=0, rtol=1e-7, raise_error=True,
ignore_keywords_overwrite=None):
"""
Compare CALXXX output with "truth" using ``fitsdiff``.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order. Example::
[('output1.fits', 'truth1.fits'),
('output2.fits', 'truth2.fits'),
...]
atol, rtol : float
Absolute and relative tolerance for data comparison.
raise_error : bool
Raise ``AssertionError`` if difference is found.
ignore_keywords_overwrite : list of str or `None`
If not `None`, these will overwrite
``self.ignore_keywords`` for the calling test.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
if ignore_keywords_overwrite is None:
ignore_keywords = self.ignore_keywords
else:
ignore_keywords = ignore_keywords_overwrite
for actual, desired in outputs:
desired = get_bigdata('scsb-calcos', self.env, self.detector,
'truth', desired)
fdiff = FITSDiff(actual, desired, rtol=rtol, atol=atol,
ignore_keywords=ignore_keywords)
creature_report += fdiff.report()
if not fdiff.identical and all_okay:
all_okay = False
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
|
spacetelescopeREPO_NAMEcalcosPATH_START.@calcos_extracted@calcos-master@[email protected]@.PATH_END.py
|
{
"filename": "find_stars.py",
"repo_name": "ucl-exoplanets/pylightcurve",
"repo_path": "pylightcurve_extracted/pylightcurve-master/pylightcurve/images/find_stars.py",
"type": "Python"
}
|
__all__ = ['find_single_star']
import numpy as np
import warnings
from pylightcurve.analysis.gaussian import fit_two_d_gaussian
from pylightcurve.analysis.distributions import one_d_distribution
def find_single_star(data_array, predicted_x, predicted_y, mean=None, std=None, burn_limit=65000, star_std=2,
std_limit=5.0):
star = None
if 0 < predicted_x < len(data_array[0]) and 0 < predicted_x < len(data_array):
if mean is None or std is None:
fit_mean, fit_std = one_d_distribution(data_array, gaussian_fit=True, mad_filter=5)[2:4]
if not mean:
mean = fit_mean
if not std:
std = fit_std
centroids = find_centroids(data_array, predicted_x - 5 * star_std, predicted_x + 5 * star_std,
predicted_y - 5 * star_std, predicted_y + 5 * star_std, mean, std, burn_limit, star_std,
std_limit)
centroids = sorted(centroids, key=lambda x: np.sqrt((x[0] - predicted_x) ** 2 + (x[1] - predicted_y) ** 2))
for centroid in centroids:
star = _star_from_centroid(data_array, centroid[0], centroid[1], mean, std, burn_limit, star_std, std_limit)
if star:
star = [star[0][2], star[0][3], star[0][0], star[0][1], star[0][4], star[0][5], centroid[0], centroid[1]]
break
return star
def _star_from_centroid(data_array, centroid_x, centroid_y, mean, std, burn_limit, star_std, std_limit):
star = None
try:
search_window = int(round(10 * star_std))
y_min = int(max(int(centroid_y) - search_window, 0))
y_max = int(min(int(centroid_y) + search_window, len(data_array) - 1))
x_min = int(max(int(centroid_x) - search_window, 0))
x_max = int(min(int(centroid_x) + search_window, len(data_array[0]) - 1))
datax, datay = np.meshgrid(np.arange(x_min, x_max + 1) + 0.5,
np.arange(y_min, y_max + 1) + 0.5)
dataz = data_array[y_min: y_max + 1, x_min: x_max + 1]
popt, pcov = fit_two_d_gaussian(datax, datay, dataz, positive=True, point_xy=(centroid_x, centroid_y),
sigma=star_std, maxfev=1000)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if popt[0] > std_limit * std and popt[0] + popt[1] < burn_limit:
if np.sqrt(pcov[0][0]) != np.inf:
if popt[0] > std_limit * np.sqrt(pcov[0][0]):
star = (popt, pcov)
else:
star = (popt, pcov)
except:
pass
return star
def find_centroids(data_array, x_low, x_upper, y_low, y_upper, mean, std, burn_limit, star_std, std_limit):
x_upper = int(min(x_upper, len(data_array[0])))
y_upper = int(min(y_upper, len(data_array)))
x_low = int(max(0, x_low))
y_low = int(max(0, y_low))
data_array = np.full_like(data_array[y_low:y_upper + 1, x_low:x_upper + 1],
data_array[y_low:y_upper + 1, x_low:x_upper + 1])
test = []
for i in range(-star_std, star_std + 1):
for j in range(-star_std, star_std + 1):
rolled = np.roll(np.roll(data_array, i, 0), j, 1)
test.append(rolled)
median_test = np.median(test, 0)
max_test = np.max(test, 0)
del test
stars = np.where((data_array < burn_limit) & (data_array > mean + std_limit * std) & (max_test == data_array)
& (median_test > mean + 2 * std))
del data_array
stars = [stars[1] + x_low, stars[0] + y_low]
stars = np.swapaxes(stars, 0, 1)
return stars
|
ucl-exoplanetsREPO_NAMEpylightcurvePATH_START.@pylightcurve_extracted@pylightcurve-master@pylightcurve@images@[email protected]_END.py
|
{
"filename": "Useful_Utilities.ipynb",
"repo_name": "LSSTDESC/rail",
"repo_path": "rail_extracted/rail-main/examples/core_examples/Useful_Utilities.ipynb",
"type": "Jupyter Notebook"
}
|
# Useful Utilities
**Authors:** Olivia Lynn
**Last Run Successfully:** September 20, 2023
This is a notebook that contains various utilities that may be used when working with RAIL.
## Setting Things Up
```python
import rail
```
### Listing imported stages (1/2)
Let's list out our currently imported stages. Right now, this will only be what we get by importing `rail` and `rail.stages`.
```python
import rail.stages
for val in rail.core.stage.RailStage.pipeline_stages.values():
print(val[0])
```
### Import and attach all
Using `rail.stages.import_and_attach_all()` lets you import all packages within the RAIL ecosystem at once.
This kind of blanket import is a useful shortcut; however, it will be slower than specific imports, as you will import things you'll never need.
As of such, `import_and_attach_all` is recommended for new users and those who wish to do rapid exploration with notebooks; pipelines designed to be run at scale would generally prefer lightweight, specific imports.
```python
import rail
import rail.stages
rail.stages.import_and_attach_all()
```
Now that we've attached all available stages to rail.stages, we can use `from rail.stages import *` to let us omit prefixes.
To see this in action:
```python
# with prefix
print(rail.tools.table_tools.ColumnMapper)
```
```python
# without prefix
try:
print(ColumnMapper)
except Exception as e:
print(e)
```
```python
from rail.stages import *
```
```python
print(ColumnMapper)
```
### Listing imported stages (2/2)
Now, let's try listing imported stages again, and notice how many more we get.
```python
for val in rail.core.stage.RailStage.pipeline_stages.values():
print(val[0])
```
We can use this list of imported stages to browse for specifics, such as looking through our available estimators.
**Note:** this will only filter through what you've imported, so if you haven't imported everything above, this will not be a complete list of all estimators available in RAIL.
```python
for val in rail.core.stage.RailStage.pipeline_stages.values():
if issubclass(val[0], rail.estimation.estimator.CatEstimator):
print(val[0])
```
### Listing keys in the Data Store (1/2)
Let's list out the keys in the Data Store to see what data we have stored.
First, we must set up the Data Store:
```python
DS = rail.core.stage.RailStage.data_store
DS.__class__.allow_overwrite = True
```
And because we've only just created the store, as you may have guessed, it is empty.
We'll come back to this in a bit.
```python
DS.keys()
```
### Finding data files with find_rail_file
We need to define our flow file that we'll use in our pipeline
If we already know its path, we can just point directly to the file (relative to the directory that holds our `rail/` directory):
```python
import os
from rail.utils.path_utils import RAILDIR
flow_file = os.path.join(
RAILDIR, "rail/examples_data/goldenspike_data/data/pretrained_flow.pkl"
)
```
But if we aren't sure where our file is (or we're just feeling lazy) we can use `find_rail_file`.
This is especially helpful in cases where our installation is spread out, and some rail modules are located separately from others.
```python
from rail.utils.path_utils import find_rail_file
flow_file = find_rail_file('examples_data/goldenspike_data/data/pretrained_flow.pkl')
```
We can set our FLOWDIR based on the location of our flow file, too.
```python
os.environ['FLOWDIR'] = os.path.dirname(flow_file)
```
```python
# Now, we have to set up some other variables for our pipeline:
import numpy as np
bands = ["u", "g", "r", "i", "z", "y"]
band_dict = {band: f"mag_{band}_lsst" for band in bands}
rename_dict = {f"mag_{band}_lsst_err": f"mag_err_{band}_lsst" for band in bands}
post_grid = [float(x) for x in np.linspace(0.0, 5, 21)]
```
## Creating the Pipeline
```python
import ceci
```
```python
# Make some stages
flow_engine_test = FlowCreator.make_stage(
name="flow_engine_test", model=flow_file, n_samples=50
)
col_remapper_test = ColumnMapper.make_stage(
name="col_remapper_test", hdf5_groupname="", columns=rename_dict
)
#flow_engine_test.sample(6, seed=0).data
```
```python
# Add the stages to the pipeline
pipe = ceci.Pipeline.interactive()
stages = [flow_engine_test, col_remapper_test]
for stage in stages:
pipe.add_stage(stage)
```
```python
# Connect stages
col_remapper_test.connect_input(flow_engine_test)
```
## Introspecting the Pipeline
### Listing keys in the Data Store (2/2)
Now that we have a some data in the Data Store, let's take another look at it.
```python
DS.keys()
```
### Getting names of stages in the pipeline
```python
pipe.stage_names
```
### Getting the configuration of a particular stage
Let's take a look a the config of the first stage we just listed above.
```python
pipe.flow_engine_test.config
```
### Updating a configuration value
We can update config values even after the stage has been created. Let's give it a try.
```python
pipe.flow_engine_test.config.update(seed=42)
pipe.flow_engine_test.config
```
### Listing stage outputs (as both tags and aliased tags)
Let's get the list of outputs as 'tags'.
These are how the stage thinks of the outputs, as a list names associated to DataHandle types.
```python
pipe.flow_engine_test.outputs
```
We can also get the list of outputs as 'aliased tags'.
These are how the pipeline thinks of the outputs, as a unique key that points to a particular file
```python
pipe.flow_engine_test._outputs
```
### Listing all pipeline methods and parameters that can be set
If you'd like to take a closer look at what you can do with a pipeline, use `dir(pipe)` to list out available methods and parameters.
```python
for item in dir(pipe):
if '__' not in item:
print(item)
```
## Initializing the Pipeline
### Toggling resume mode
We can turn 'resume mode' on when initializing a pipeline.
Resume mode lets us skip stages that already have output files, so we don't have to rerun the same stages as we iterate on a pipeline.
Just add a `resume=True` to do so.
```python
pipe.initialize(
dict(model=flow_file), dict(output_dir=".", log_dir=".", resume=True), None
)
```
Running `pipe.stages` should show order of classes, or all the stages this pipeline will run.
```python
pipe.stages
```
## Managing notebooks with git
_(thank you to https://stackoverflow.com/a/58004619)_
You can modify your git settings to run a filter over certain files before they are added to git. This will leave the original file on disk as-is, but commit the "cleaned" version.
First, add the following to your local `.git/config` file (or global `~/.gitconfig`):
[filter "strip-notebook-output"]
clean = "jupyter nbconvert --ClearOutputPreprocessor.enabled=True --to=notebook --stdin --stdout --log-level=ERROR"
Then, create a `.gitattributes` file in your directory with notebooks and add the following line:
*.ipynb filter=strip-notebook-output
|
LSSTDESCREPO_NAMErailPATH_START.@rail_extracted@rail-main@examples@core_examples@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ahmedfgad/GeneticAlgorithmPython",
"repo_path": "GeneticAlgorithmPython_extracted/GeneticAlgorithmPython-master/pygad/utils/__init__.py",
"type": "Python"
}
|
from pygad.utils import parent_selection
from pygad.utils import crossover
from pygad.utils import mutation
from pygad.utils import nsga2
__version__ = "1.2.1"
|
ahmedfgadREPO_NAMEGeneticAlgorithmPythonPATH_START.@GeneticAlgorithmPython_extracted@GeneticAlgorithmPython-master@pygad@utils@[email protected]_END.py
|
{
"filename": "test_special.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/ops/test_special.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from scipy.special import iv
from torch import tensor
from torch.autograd import grad
from pyro.ops.special import get_quad_rule, log_beta, log_binomial, log_I1, safe_log
from tests.common import assert_equal
def test_safe_log():
# Test values.
x = torch.randn(1000).exp().requires_grad_()
expected = x.log()
actual = safe_log(x)
assert_equal(actual, expected)
assert_equal(grad(actual.sum(), [x])[0], grad(expected.sum(), [x])[0])
# Test gradients.
x = torch.tensor(0.0, requires_grad=True)
assert not torch.isfinite(grad(x.log(), [x])[0])
assert torch.isfinite(grad(safe_log(x), [x])[0])
@pytest.mark.parametrize(
"tol",
[
1e-8,
1e-6,
1e-4,
1e-2,
0.02,
0.05,
0.1,
0.2,
0.1,
1.0,
],
)
def test_log_beta_stirling(tol):
x = torch.logspace(-5, 5, 200)
y = x.unsqueeze(-1)
expected = log_beta(x, y)
actual = log_beta(x, y, tol=tol)
assert (actual <= expected).all()
assert (expected < actual + tol).all()
@pytest.mark.parametrize(
"tol",
[
1e-8,
1e-6,
1e-4,
1e-2,
0.02,
0.05,
0.1,
0.2,
0.1,
1.0,
],
)
def test_log_binomial_stirling(tol):
k = torch.arange(200.0)
n_minus_k = k.unsqueeze(-1)
n = k + n_minus_k
# Test binomial coefficient choose(n, k).
expected = (n + 1).lgamma() - (k + 1).lgamma() - (n_minus_k + 1).lgamma()
actual = log_binomial(n, k, tol=tol)
assert (actual - expected).abs().max() < tol
@pytest.mark.parametrize("order", [0, 1, 5, 10, 20])
@pytest.mark.parametrize("value", [0.01, 0.1, 1.0, 10.0, 100.0])
def test_log_I1(order, value):
value = tensor([value])
expected = torch.tensor([iv(i, value.numpy()) for i in range(order + 1)]).log()
actual = log_I1(order, value)
assert_equal(actual, expected)
def test_log_I1_shapes():
assert_equal(log_I1(10, tensor(0.6)).shape, torch.Size([11, 1]))
assert_equal(log_I1(10, tensor([0.6])).shape, torch.Size([11, 1]))
assert_equal(log_I1(10, tensor([[0.6]])).shape, torch.Size([11, 1, 1]))
assert_equal(log_I1(10, tensor([0.6, 0.2])).shape, torch.Size([11, 2]))
assert_equal(log_I1(0, tensor(0.6)).shape, torch.Size((1, 1)))
@pytest.mark.parametrize("sigma", [0.5, 1.25])
def test_get_quad_rule(sigma):
quad_points, log_weights = get_quad_rule(32, torch.zeros(1))
quad_points *= sigma # transform to N(0, sigma) gaussian
variance = torch.logsumexp(quad_points.pow(2.0).log() + log_weights, axis=0).exp()
assert_equal(sigma**2, variance.item())
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@ops@[email protected]_END.py
|
{
"filename": "scanner.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/scanner.py",
"type": "Python"
}
|
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner:
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos.
"""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolean that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@[email protected]@.PATH_END.py
|
{
"filename": "clusters.py",
"repo_name": "ICRAR/shark",
"repo_path": "shark_extracted/shark-master/standard_plots/clusters.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2018
# Copyright by UWA (in the framework of the ICRAR)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import functools
import numpy as np
import h5py
import common
import utilities_statistics as us
##################################
# Constants
mlow = 8.0
mupp = 12.0
dm = 1.0
mbins = np.arange(mlow, mupp, dm)
xmf = mbins + dm/2.0
rlow = 0.0
rupp = 7.0
dr = 0.5
rbins = np.arange(rlow, rupp, dr)
xrf = rbins + dr/2.0
GyrtoYr = 1e9
MpcToKpc = 1e3
G = 4.299e-9 #Gravity constant in units of (km/s)^2 * Mpc/Msun
offMS = 0.2
def add_observations_to_plot(obsdir, fname, ax, marker, label, color='k', err_absolute=False):
fname = '%s/Gas/%s' % (obsdir, fname)
x, y, yerr_down, yerr_up = common.load_observation(obsdir, fname, (0, 1, 2, 3))
common.errorbars(ax, x, y, yerr_down, yerr_up, color, marker, label=label, err_absolute=err_absolute)
def prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit):
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
xleg = xmax - 0.2 * (xmax-xmin)
yleg = ymax - 0.1 * (ymax-ymin)
#ax.text(xleg, yleg, 'z=0')
def prepare_data(hdf5_data, fradii, index):
bin_it = functools.partial(us.wmedians, xbins=xmf)
stack = functools.partial(us.stacking, xbins=xmf)
# Unpack data
(h0, _, typeg, mdisk, mbulge, _, _, mHI, mH2, mgas,
mHI_bulge, mH2_bulge, mgas_bulge, mvir, sfrd, sfrb,
x, y, z, vvir) = hdf5_data
XH = 0.72
h0log = np.log10(float(h0))
rvir = G * mvir / pow(vvir,2.0) / h0
mstar_tot = (mdisk + mbulge) / h0
sfr_tot = (sfrd + sfrb) / h0 / GyrtoYr
#define main sequence first
inms = np.where((mstar_tot > 5e8) & (mstar_tot < 7e9) & (typeg == 0) & (sfr_tot > 0))
ms = np.polyfit(np.log10(mstar_tot[inms]), np.log10(sfr_tot[inms]), 2)
gasfracms = np.polyfit(np.log10(mstar_tot[inms]), np.log10(mgas[inms]+mgas_bulge[inms])-h0log, 2)
indcen = np.where((mvir/h0 > 3e14) & (typeg == 0))
x_cen = x[indcen]
y_cen = y[indcen]
z_cen = z[indcen]
rvir_cen = rvir[indcen]
#find the closest central to the centrals in massive clusters
for g in range(0,len(x_cen)):
selec_cens = np.where((typeg == 0) & (mstar_tot > 1e9))
d_all = np.sqrt(pow(x[selec_cens] - x_cen[g], 2.0) + pow(y[selec_cens] - y_cen[g], 2.0) + pow(z[selec_cens] - z_cen[g], 2.0))/h0/rvir_cen[g]
ms_all = mstar_tot[selec_cens]
selec_cens = np.where(d_all > 0)
d_all_in = d_all[selec_cens]
ms_all_in = ms_all[selec_cens]
ids = np.argsort(d_all_in)
print ("minimum distance to a central %s of mass %s" % (str(d_all_in[ids[0]]), str(ms_all_in[ids[0]])))
print ('number of clusters %d'% len(x_cen))
nradii_this_z = np.zeros(shape = (3, len(xmf), len(xrf), len(x_cen)))
#xy projection
for g in range(0,len(x_cen)):
d_all = np.sqrt(pow(x - x_cen[g], 2.0) + pow(y - y_cen[g], 2.0))/h0/rvir_cen[g]
for i in range(0, len(xmf)):
ind = np.where((np.log10(mstar_tot) >= xmf[i] - dm/2.0)
& (np.log10(mstar_tot) < xmf[i] + dm/2.0)
& (d_all < 7.5))
#print 'number of neighbours', len(sfr_tot[ind])
mstars_galsin = np.log10(mstar_tot[ind])
sfr_tot_galsin = sfr_tot[ind]
mgas_tot_galsin = (mgas[ind] + mgas_bulge[ind])/h0
dist_to_ms = sfr_tot_galsin / pow(10.0, (ms[0] * mstars_galsin**2.0 + ms[1] * mstars_galsin + ms[2]))
dist_to_gf = mgas_tot_galsin / pow(10.0, (gasfracms[0] * mstars_galsin**2.0 + gasfracms[1] * mstars_galsin + gasfracms[2]))
dist_proj = d_all[ind]
for j in range(0, len(xrf)):
inr = np.where((dist_proj >= xrf[j] - dr/2.0) & (dist_proj < xrf[j] + dr/2.0))
nradii_this_z[0,i,j,g] = len(dist_proj[inr])
inr = np.where((dist_proj >= xrf[j] - dr/2.0) & (dist_proj < xrf[j] + dr/2.0) & (dist_to_ms > offMS))
nradii_this_z[1,i,j,g] = len(dist_proj[inr])
inr = np.where((dist_proj >= xrf[j] - dr/2.0) & (dist_proj < xrf[j] + dr/2.0) & (dist_to_gf > offMS))
nradii_this_z[2,i,j,g] = len(dist_proj[inr])
for i in range(0, len(xmf)):
for j in range(0, len(xrf)):
selec_cl = np.where(nradii_this_z[0,i,j,:] > 0)
fradii[0,0,index,i,j] = np.median(nradii_this_z[1,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
fradii[0,1,index,i,j] = np.std(nradii_this_z[1,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
fradii[1,0,index,i,j] = np.median(nradii_this_z[2,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
fradii[1,1,index,i,j] = np.std(nradii_this_z[2,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
return nradii_this_z
def plot_fractions_radii(plt, output_dir, fradii):
###################################
# Plots global mass densities
fig = plt.figure(figsize=(6,7))
plt.subplots_adjust(bottom=0.15, left=0.15)
subplots = (321, 322, 323, 324, 325, 326)
zs = (0, 0.3, 0.5)
cols = ('r','yellowgreen','darkblue')
colse = ('Crimson','Green','blue')
xmin, xmax, ymin, ymax = 0, 7, -0.05, 1.05
xleg = xmin + 0.05 * (xmax - xmin)
yleg = ymax - 0.1 * (ymax - ymin)
xtitle = '$\\rm d_{\\rm proj}/cMpc$'
ytitle = '$\\rm fraction$'
labels = ('Main sequence', 'Gas rich')
labelsz = ('z=0', 'z=0.3', 'z=0.5')
#read C-EAGLE data
#ceagledata = h5py.File('../../BuffaloFigure_C-EAGLE_Jul19_longMS.hdf5','r')
ceagledatasf = h5py.File('../../BuffaloFigure_C-EAGLE_30Jul19_longMS_ssfr_Hydrangea.hdf5', 'r')
a_group_key = list(ceagledatasf.keys())[1]
print (a_group_key)
# Get the data
databahesf = list(ceagledatasf[a_group_key])
ceagledatagas = h5py.File('../../BuffaloFigure_C-EAGLE_30Jul19_longMS_hn_Hydrangea.hdf5', 'r')
a_group_key = list(ceagledatagas.keys())[1]
# Get the data
databahegas = list(ceagledatagas[a_group_key])
p = 0
for i in range(0, len(xmf)-1):
for j in range(0,2):
ax = fig.add_subplot(subplots[p])
if(p <= 1):
ax.text(1,1.1,labels[j])
if (p >= 4):
xtit = xtitle
else:
xtit = ''
if (p == 0 or p == 2 or p == 4):
ytit = ytitle
else:
ytit = ''
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit, locators=(1, 1, 1))
ax.text(xleg, yleg, '$M_{\\star}$=%s' % str(xmf[i]))
if(j == 0):
for z in range(0,3):
x = databahesf[i][z][0]
y = databahesf[i][z][1]
yerrdn = databahesf[i][z][2]
yerrup = databahesf[i][z][3]
ind = np.where(y >= 0)
ax.fill_between(x[ind],yerrdn[ind],yerrup[ind], facecolor=colse[z], alpha=0.2,interpolate=True)
ax.plot(x[ind],y[ind],linewidth=2, linestyle='dashed', color=colse[z])
if(j == 1):
for z in range(0,3):
x = databahegas[i][z][0]
y = databahegas[i][z][1]
yerrdn = databahegas[i][z][2]
yerrup = databahegas[i][z][3]
ind = np.where(y >= 0)
ax.fill_between(x[ind],yerrdn[ind],yerrup[ind], facecolor=colse[z], alpha=0.2,interpolate=True)
ax.plot(x[ind],y[ind],linewidth=2, linestyle='dashed', color=colse[z])
#predicted fraction
for z in range (0,3):
ind = np.where(fradii[j,0,z,i,:] > 0)
xplot = xrf[ind]
yplot = fradii[j,0,z,i,ind]
err = fradii[j,1,z,i,ind]
if(p == 2):
ax.plot(xplot, yplot[0], color=cols[z], linestyle='solid', label=labelsz[z], linewidth=2)
else:
ax.plot(xplot, yplot[0], color=cols[z], linestyle='solid', linewidth=2)
ax.fill_between(xplot,yplot[0],yplot[0]-err[0], facecolor=cols[z], alpha=0.2,interpolate=True)
ax.fill_between(xplot,yplot[0],yplot[0]+err[0], facecolor=cols[z], alpha=0.2,interpolate=True)
if(p == 2):
ax.legend(['z=0','z=0.3','z=0.5'],loc='lower right',fontsize='small')
p = p + 1
common.savefig(output_dir, fig, "cluster_fractions.pdf")
def plot_individual_clusters(plt, output_dir, nradii_z0, nradii_z0p3, nradii_z0p5):
###################################
# Plots global mass densities
fig = plt.figure(figsize=(6,7))
plt.subplots_adjust(bottom=0.15, left=0.15)
subplots = (321, 322, 323, 324, 325, 326)
zs = (0, 0.3, 0.5)
cols = ('r','g','b')
lines = ('dotted', 'dashed', 'solid')
xmin, xmax, ymin, ymax = 0, 5, -0.05, 1.05
xleg = xmax - 0.3 * (xmax - xmin)
yleg = ymin + 0.1 * (ymax - ymin)
xtitle = '$\\rm d_{\\rm proj}/cMpc$'
ytitle = '$\\rm fraction$'
labels = ('Main sequence', 'Gas rich')
p = 0
for i in range(0, len(xmf)-1):
for j in range(0,2):
ax = fig.add_subplot(subplots[p])
if(p <= 1):
ax.text(1,1.1,labels[j])
if (p >= 4):
xtit = xtitle
else:
xtit = ''
if (p == 0 or p == 2 or p == 4):
ytit = ytitle
else:
ytit = ''
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit, locators=(1, 1, 1))
ax.text(xleg, yleg, '$M_{\\star}$=%s' % str(xmf[i]))
#predicted fraction
for j in range(0,len(nradii_z0p3[0,0,0,:])):
ind = np.where(nradii_z0p3[0,i,:,j] > 4)
xplot = xrf[ind]
yplot = (nradii_z0p3[1,i,ind,j] + 0.0)/(nradii_z0p3[0,i,ind,j] + 0.0)
ax.plot(xplot, yplot[0], color=cols[1], linestyle = 'solid',linewidth = 0.5)
for j in range(0,len(nradii_z0p5[0,0,0,:])):
ind = np.where(nradii_z0p5[0,i,:,j] > 4)
xplot = xrf[ind]
yplot = (nradii_z0p5[1,i,ind,j] + 0.0)/(nradii_z0p5[0,i,ind,j] + 0.0)
ax.plot(xplot, yplot[0], color=cols[2], linestyle = 'dashed',linewidth = 0.5)
p = p + 1
#common.prepare_legend(ax, ['k','b','r','grey','grey'])
common.savefig(output_dir, fig, "individual_cluster_fractions.pdf")
def main(model_dir, output_dir, redshift_table, subvols, obs_dir):
plt = common.load_matplotlib()
zlist = (0, 0.3, 0.5)
fields = {'galaxies': ('type', 'mstars_disk', 'mstars_bulge',
'rstar_disk', 'm_bh', 'matom_disk', 'mmol_disk', 'mgas_disk',
'matom_bulge', 'mmol_bulge', 'mgas_bulge', 'mvir_hosthalo', 'sfr_disk',
'sfr_burst', 'position_x', 'position_y', 'position_z', 'vvir_hosthalo')}
fradii = np.zeros(shape = (2, 2, len(zlist), len(xmf), len(xrf)))
fradii[:] = -1
for index, snapshot in enumerate(redshift_table[zlist]):
hdf5_data = common.read_data(model_dir, snapshot, fields, subvols)
nradii = prepare_data(hdf5_data, fradii, index)
if(index == 0):
nradii_z0 = nradii
if(index == 1):
nradii_z0p3 = nradii
if(index == 2):
nradii_z0p5 = nradii
plot_fractions_radii(plt, output_dir, fradii)
plot_individual_clusters(plt, output_dir, nradii_z0, nradii_z0p3, nradii_z0p5)
if __name__ == '__main__':
main(*common.parse_args())
|
ICRARREPO_NAMEsharkPATH_START.@shark_extracted@shark-master@[email protected]@.PATH_END.py
|
{
"filename": "filters.py",
"repo_name": "astro-informatics/s2wav",
"repo_path": "s2wav_extracted/s2wav-main/s2wav/filters.py",
"type": "Python"
}
|
from jax import jit
import jax.numpy as jnp
import torch
import numpy as np
from typing import Tuple
from functools import partial
from s2wav import samples
def filters_axisym(
L: int, J_min: int = 0, lam: float = 2.0
) -> Tuple[np.ndarray, np.ndarray]:
r"""Computes wavelet kernels :math:`\Psi^j_{\ell m}` and scaling kernel
:math:`\Phi_{\ell m}` in harmonic space.
Specifically, these kernels are derived in `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_,
where the wavelet kernels are defined (15) for scale :math:`j` to be
.. math::
\Psi^j_{\ell m} \equiv \sqrt{\frac{2\ell+1}{4\pi}} \kappa_{\lambda}(\frac{\ell}{\lambda^j})\delta_{m0},
where :math:`\kappa_{\lambda} = \sqrt{k_{\lambda}(t/\lambda) - k_{\lambda}(t)}` for :math:`k_{\lambda}`
given in :func:`~k_lam`. Similarly, the scaling kernel is defined (16) as
.. math::
\Phi_{\ell m} \equiv \sqrt{\frac{2\ell+1}{4\pi}} \nu_{\lambda} (\frac{\ell}{\lambda^{J_0}})\delta_{m0},
where :math:`\nu_{\lambda} = \sqrt{k_{\lambda}(t)}` for :math:`k_{\lambda}` given in :func:`~k_lam`.
Notice that :math:`\delta_{m0}` enforces that these kernels are axisymmetric, i.e. coefficients
for :math:`m \not = \ell` are zero. In this implementation the normalisation constant has been
omitted as it is nulled in subsequent functions.
Args:
L (int): Harmonic band-limit.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
Raises:
ValueError: J_min is negative or greater than J.
Returns:
Tuple[np.ndarray, np.ndarray]: Unnormalised wavelet kernels :math:`\Psi^j_{\ell m}`
with shape :math:`[(J+1)L]`, and scaling kernel :math:`\Phi_{\el m}` with shape
:math:`[L]` in harmonic space.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the sphere", A&A, vol. 558, p. A128, 2013.
"""
J = samples.j_max(L, lam)
if J_min >= J or J_min < 0:
raise ValueError(
"J_min must be non-negative and less than J= "
+ str(J)
+ " for given L and lam."
)
previoustemp = 0.0
k = k_lam(L, lam)
psi = np.zeros((J + 1, L), np.float64)
phi = np.zeros(L, np.float64)
for l in range(L):
phi[l] = np.sqrt(k[J_min, l])
for j in range(J_min, J + 1):
for l in range(L):
diff = k[j + 1, l] - k[j, l]
if diff < 0:
psi[j, l] = previoustemp
else:
temp = np.sqrt(diff)
psi[j, l] = temp
previoustemp = temp
return psi, phi
def filters_directional(
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
spin0: int = 0,
using_torch: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
r"""Generates the harmonic coefficients for the directional tiling wavelets.
This implementation is based on equation 36 in the wavelet computation paper
`[1] <https://arxiv.org/pdf/1509.06749.pdf>`_.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
spin (int, optional): Spin (integer) to perform the transform. Defaults to 0.
spin0 (int, optional): Spin number the wavelet was lowered from. Defaults to 0.
using_torch (bool, optional): Desired frontend functionality. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple of wavelet and scaling kernels
(:math:`\Psi^j_{\ell n}`, :math:`\Phi_{\ell m}`)
Notes:
[1] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint arXiv:1509.06749 (2015).
"""
J = samples.j_max(L, lam)
el_min = max(abs(spin), abs(spin0))
phi = np.zeros(L, dtype=np.float64)
psi = np.zeros((J + 1, L, 2 * L - 1), dtype=np.complex128)
kappa, kappa0 = filters_axisym(L, J_min, lam)
s_elm = tiling_direction(L, N)
for el in range(el_min, L):
if kappa0[el] != 0:
phi[el] = np.sqrt((2 * el + 1) / (4.0 * np.pi)) * kappa0[el]
if spin0 != 0:
phi[el] *= _spin_normalization(el, spin0) * (-1) ** spin0
for j in range(J_min, J + 1):
for el in range(el_min, L):
if kappa[j, el] != 0:
for m in range(-el, el + 1):
if s_elm[el, L - 1 + m] != 0:
psi[j, el, L - 1 + m] = (
np.sqrt((2 * el + 1) / (8.0 * np.pi * np.pi))
* kappa[j, el]
* s_elm[el, L - 1 + m]
)
if spin0 != 0:
psi[j, el, L - 1 + m] *= (
_spin_normalization(el, spin0) * (-1) ** spin0
)
if using_torch:
psi = torch.from_numpy(psi)
phi = torch.from_numpy(phi)
return psi, phi
def filters_axisym_vectorised(
L: int, J_min: int = 0, lam: float = 2.0
) -> Tuple[np.ndarray, np.ndarray]:
r"""Vectorised version of :func:`~filters_axisym`.
Args:
L (int): Harmonic band-limit.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Raises:
ValueError: J_min is negative or greater than J.
Returns:
Tuple[np.ndarray, np.ndarray]: Unnormalised wavelet kernels :math:`\Psi^j_{\ell m}`
with shape :math:`[(J+1)L], and scaling kernel :math:`\Phi_{\ell m}` with shape
:math:`[L]` in harmonic space.
"""
J = samples.j_max(L, lam)
if J_min >= J or J_min < 0:
raise ValueError(
"J_min must be non-negative and less than J= "
+ str(J)
+ " for given L and lam."
)
k = k_lam(L, lam)
diff = (np.roll(k, -1, axis=0) - k)[:-1]
diff[diff < 0] = 0
return np.sqrt(diff), np.sqrt(k[J_min])
def filters_directional_vectorised(
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
spin0: int = 0,
using_torch: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
r"""Vectorised version of :func:`~filters_directional`.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
spin (int, optional): Spin (integer) to perform the transform. Defaults to 0.
spin0 (int, optional): Spin number the wavelet was lowered from. Defaults to 0.
using_torch (bool, optional): Desired frontend functionality. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple of wavelet and scaling kernels
(:math:`\Psi^j_{\ell n}`, :math:`\Phi_{\ell m}`).
"""
el_min = max(abs(spin), abs(spin0))
spin_norms = (
(-1) ** spin0 * _spin_normalization_vectorised(np.arange(L), spin0)
if spin0 != 0
else 1
)
kappa, kappa0 = filters_axisym_vectorised(L, J_min, lam)
s_elm = tiling_direction(L, N)
kappa0 *= np.sqrt((2 * np.arange(L) + 1) / (4.0 * np.pi))
kappa0 = kappa0 * spin_norms if spin0 != 0 else kappa0
kappa *= np.sqrt((2 * np.arange(L) + 1) / 8.0) / np.pi
kappa = np.einsum("ij,jk->ijk", kappa, s_elm)
kappa = np.einsum("ijk,j->ijk", kappa, spin_norms) if spin0 != 0 else kappa
kappa0[:el_min] = 0
kappa[:, :el_min, :] = 0
if using_torch:
kappa0 = torch.from_numpy(kappa0)
kappa = torch.from_numpy(kappa)
return kappa, kappa0
@partial(jit, static_argnums=(0, 1, 2))
def filters_axisym_jax(
L: int, J_min: int = 0, lam: float = 2.0
) -> Tuple[jnp.ndarray, jnp.ndarray]:
r"""JAX version of :func:`~filters_axisym_vectorised`.
Args:
L (int): Harmonic band-limit.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Raises:
ValueError: J_min is negative or greater than J.
Returns:
Tuple[np.ndarray, np.ndarray]: Unnormalised wavelet kernels :math:`\Psi^j_{\ell m}`
with shape :math:`[(J+1)L], and scaling kernel :math:`\Phi_{\ell m}` with shape
:math:`[L]` in harmonic space.
"""
J = samples.j_max(L, lam)
if J_min >= J or J_min < 0:
raise ValueError(
"J_min must be non-negative and less than J= "
+ str(J)
+ " for given L and lam."
)
k = k_lam_jax(L, lam)
diff = (jnp.roll(k, -1, axis=0) - k)[:-1]
diff = jnp.where(diff < 0, jnp.zeros((J + 1, L)), diff)
return jnp.sqrt(diff), jnp.sqrt(k[J_min])
@partial(jit, static_argnums=(0, 1, 2, 3, 4, 5))
def filters_directional_jax(
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
spin0: int = 0,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
r"""JAX version of :func:`~filters_directional`.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
spin (int, optional): Spin (integer) to perform the transform. Defaults to 0.
spin0 (int, optional): Spin number the wavelet was lowered from. Defaults to 0.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple of wavelet and scaling kernels
(:math:`\Psi^j_{\ell n}`, :math:`\Phi_{\ell m}`).
"""
el_min = max(abs(spin), abs(spin0))
spin_norms = (
(-1) ** spin0 * _spin_normalization_jax(np.arange(L), spin0)
if spin0 != 0
else 1
)
kappa, kappa0 = filters_axisym_jax(L, J_min, lam)
s_elm = tiling_direction_jax(L, N)
kappa0 *= jnp.sqrt((2 * jnp.arange(L) + 1) / (4.0 * jnp.pi))
kappa0 = kappa0 * spin_norms if spin0 != 0 else kappa0
kappa *= jnp.sqrt((2 * jnp.arange(L) + 1) / 8.0) / np.pi
kappa = jnp.einsum("ij,jk->ijk", kappa, s_elm, optimize=True)
kappa = (
jnp.einsum("ijk,j->ijk", kappa, spin_norms, optimize=True)
if spin0 != 0
else kappa
)
kappa0 = kappa0.at[:el_min].set(0)
kappa = kappa.at[:, :el_min, :].set(0)
return kappa, kappa0
def tiling_integrand(t: float, lam: float = 2.0) -> float:
r"""Tiling integrand for scale-discretised wavelets `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_.
Intermediate step used to compute the wavelet and scaling function generating
functions. One of the basic mathematical functions needed to carry out the tiling of
the harmonic space.
Args:
t (float): Real argument over which we integrate.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales.Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Returns:
float: Value of tiling integrand for given :math:`t` and scaling factor.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on
the sphere", A&A, vol. 558, p. A128, 2013.
"""
s_arg = (t - (1.0 / lam)) * (2.0 * lam / (lam - 1.0)) - 1.0
integrand = np.exp(-2.0 / (1.0 - s_arg**2.0)) / t
return integrand
def part_scaling_fn(a: float, b: float, n: int, lam: float = 2.0) -> float:
r"""Computes integral used to calculate smoothly decreasing function :math:`k_{\lambda}`.
Intermediate step used to compute the wavelet and scaling function generating
functions. Uses the trapezium method to integrate :func:`~tiling_integrand` in the
limits from :math:`a \rightarrow b` with scaling parameter :math:`\lambda`. One of
the basic mathematical functions needed to carry out the tiling of the harmonic
space.
Args:
a (float): Lower limit of the numerical integration.
b (float): Upper limit of the numerical integration.
n (int): Number of steps to be performed during integration.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales.Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Returns:
float: Integral of the tiling integrand from :math:`a \rightarrow b`.
"""
sum = 0.0
h = (b - a) / n
if a == b:
return 0
for i in range(n):
if a + i * h not in [1 / lam, 1.0] and a + (i + 1) * h not in [
1 / lam,
1.0,
]:
f1 = tiling_integrand(a + i * h, lam)
f2 = tiling_integrand(a + (i + 1) * h, lam)
sum += ((f1 + f2) * h) / 2
return sum
def k_lam(L: int, lam: float = 2.0, quad_iters: int = 300) -> float:
r"""Compute function :math:`k_{\lambda}` used as a wavelet generating function.
Specifically, this function is derived in [1] and is given by
.. math::
k_{\lambda} \equiv \frac{ \int_t^1 \frac{\text{d}t^{\prime}}{t^{\prime}}
s_{\lambda}^2(t^{\prime})}{ \int_{\frac{1}{\lambda}}^1
\frac{\text{d}t^{\prime}}{t^{\prime}} s_{\lambda}^2(t^{\prime})},
where the integrand is defined to be
.. math::
s_{\lambda} \equiv s \Big ( \frac{2\lambda}{\lambda - 1}(t-\frac{1}{\lambda})
- 1 \Big ),
for infinitely differentiable Cauchy-Schwartz function :math:`s(t) \in C^{\infty}`.
Args:
L (int): Harmonic band-limit.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
quad_iters (int, optional): Total number of iterations for quadrature
integration. Defaults to 300.
Returns:
(np.ndarray): Value of :math:`k_{\lambda}` computed for values between
:math:`\frac{1}{\lambda}` and 1, parametrised by :math:`\ell` as required to
compute the axisymmetric filters in :func:`~tiling_axisym`.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the
sphere", A&A, vol. 558, p. A128, 2013.
"""
J = samples.j_max(L, lam)
normalisation = part_scaling_fn(1.0 / lam, 1.0, quad_iters, lam)
k = np.zeros((J + 2, L))
for j in range(J + 2):
for l in range(L):
if l < lam ** (j - 1):
k[j, l] = 1
elif l > lam**j:
k[j, l] = 0
else:
k[j, l] = (
part_scaling_fn(l / lam**j, 1.0, quad_iters, lam) / normalisation
)
return k
@partial(jit, static_argnums=(2, 3)) # not sure
def _part_scaling_fn_jax(a: float, b: float, n: int, lam: float = 2.0) -> float:
r"""JAX version of part_scaling_fn. Computes integral used to calculate smoothly
decreasing function :math:`k_{\lambda}`.
Intermediate step used to compute the wavelet and scaling function generating
functions. Uses the trapezium method to integrate :func:`~tiling_integrand` in the
limits from :math:`a \rightarrow b` with scaling parameter :math:`\lambda`. One of
the basic mathematical functions needed to carry out the tiling of the harmonic
space.
Args:
a (float): Lower limit of the numerical integration.
b (float): Upper limit of the numerical integration.
n (int): Number of steps to be performed during integration.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales.Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Returns:
float: Integral of the tiling integrand from :math:`a \rightarrow b`.
"""
h = (b - a) / n
x = jnp.linspace(a, b, num=n + 1)
s_arg = (x - (1.0 / lam)) * (2.0 * lam / (lam - 1.0)) - 1.0
value = jnp.where(
(x[:-1] == 1.0 / lam) | (x[:-1] == 1.0) | (x[1:] == 1.0 / lam) | (x[1:] == 1.0),
jnp.zeros(n),
(jnp.exp(-2.0 / (1.0 - jnp.square(s_arg))) / x)[:-1]
+ (jnp.exp(-2.0 / (1.0 - jnp.square(s_arg))) / x)[1:],
)
return jnp.sum(value * h / 2)
@partial(jit, static_argnums=(0, 1, 2))
def k_lam_jax(L: int, lam: float = 2.0, quad_iters: int = 300) -> float:
r"""JAX version of k_lam. Compute function :math:`k_{\lambda}` used as a wavelet
generating function.
Specifically, this function is derived in [1] and is given by
.. math::
k_{\lambda} \equiv \frac{ \int_t^1 \frac{\text{d}t^{\prime}}{t^{\prime}}
s_{\lambda}^2(t^{\prime})}{ \int_{\frac{1}{\lambda}}^1
\frac{\text{d}t^{\prime}}{t^{\prime}} s_{\lambda}^2(t^{\prime})},
where the integrand is defined to be
.. math::
s_{\lambda} \equiv s \Big ( \frac{2\lambda}{\lambda - 1}(t-\frac{1}{\lambda})
- 1 \Big ),
for infinitely differentiable Cauchy-Schwartz function :math:`s(t) \in C^{\infty}`.
Args:
L (int): Harmonic band-limit.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
quad_iters (int, optional): Total number of iterations for quadrature
integration. Defaults to 300.
Returns:
(np.ndarray): Value of :math:`k_{\lambda}` computed for values between
:math:`\frac{1}{\lambda}` and 1, parametrised by :math:`\ell` as required to
compute the axisymmetric filters in :func:`~tiling_axisym`.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the
sphere", A&A, vol. 558, p. A128, 2013.
"""
J = samples.j_max(L, lam)
normalisation = part_scaling_fn(1.0 / lam, 1.0, quad_iters, lam)
k = jnp.zeros((J + 2, L))
for j in range(J + 2):
for l in range(L):
if l < lam ** (j - 1):
k = k.at[j, l].set(1.0)
elif l > lam**j:
k = k.at[j, l].set(0.0)
else:
k = k.at[j, l].set(
part_scaling_fn(l / lam**j, 1.0, quad_iters, lam) / normalisation
)
return k
def tiling_direction(L: int, N: int = 1) -> np.ndarray:
r"""Generates the harmonic coefficients for the directionality component of the
tiling functions.
Formally, this function implements the follow equation
.. math::
_{s}\eta_{\el m} = \nu \vu \sqrt{\frac{1}{2^{\gamma}} \big ( \binom{\gamma}{
(\gamma - m)/2} \big )}
which was first derived in `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper orientational band-limit. Defaults to 1.
Returns:
np.ndarray: Harmonic coefficients of directionality components
:math:`_{s}\eta_{\el m}`.
Notes:
[1] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint
arXiv:1509.06749 (2015).
"""
if N % 2:
nu = 1
else:
nu = 1j
s_elm = np.zeros((L, 2 * L - 1), dtype=np.complex128)
for el in range(1, L):
if (N + el) % 2:
gamma = min(N - 1, el)
else:
gamma = min(N - 1, el - 1)
for m in range(-el, el + 1):
if abs(m) < N and (N + m) % 2:
s_elm[el, L - 1 + m] = nu * np.sqrt(
(samples.binomial_coefficient(gamma, ((gamma - m) / 2)))
/ (2**gamma)
)
else:
s_elm[el, L - 1 + m] = 0.0
return s_elm
def _spin_normalization(el: int, spin: int = 0) -> float:
r"""Computes the normalization factor for spin-lowered wavelets, which is
:math:`\sqrt{\frac{(\ell+s)!}{(\ell-s)!}}`.
Args:
el (int): Harmonic index :math:`\ell`.
spin (int): Spin of field over which to perform the transform. Defaults to 0.
Returns:
float: Normalization factor for spin-lowered wavelets.
"""
factor = 1.0
for s in range(-abs(spin) + 1, abs(spin) + 1):
factor *= el + s
if spin > 0:
return np.sqrt(factor)
else:
return np.sqrt(1.0 / factor)
def _spin_normalization_vectorised(el: np.ndarray, spin: int = 0) -> float:
r"""Vectorised version of :func:`~_spin_normalization`.
Args:
el (int): Harmonic index :math:`\ell`.
spin (int): Spin of field over which to perform the transform. Defaults to 0.
Returns:
float: Normalization factor for spin-lowered wavelets.
"""
factor = np.arange(-abs(spin) + 1, abs(spin) + 1).reshape(1, 2 * abs(spin) + 1)
factor = el.reshape(len(el), 1).dot(factor)
return np.sqrt(np.prod(factor, axis=1) ** (np.sign(spin)))
@partial(jit, static_argnums=(0, 1))
def tiling_direction_jax(L: int, N: int = 1) -> np.ndarray:
r"""JAX version of tiling_direction. Generates the harmonic coefficients for the
directionality component of the tiling functions.
Formally, this function implements the follow equation
.. math::
_{s}\eta_{\ell m} = \nu \vu \sqrt{\frac{1}{2^{\gamma}} \big ( \binom{\gamma}{
(\gamma - m)/2} \big )}
which was first derived in `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper orientational band-limit. Defaults to 1.
Returns:
np.ndarray: Harmonic coefficients of directionality components
:math:`_{s}\eta_{\ell m}`.
Notes:
[1] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint
arXiv:1509.06749 (2015).
"""
nu = (N % 2 - 1) ** 2 * 1j + (N % 2)
s_elm = jnp.zeros((L, 2 * L - 1), dtype=np.complex128)
for el in range(1, L):
gamma = min(N - 1, el - 1 + (N + el) % 2)
ms = jnp.arange(-el, el + 1)
val = nu * jnp.sqrt(
(samples.binomial_coefficient_jax(gamma, ((gamma - ms) / 2))) / (2**gamma)
)
val = jnp.where(
(ms < N) & (ms > -N) & ((N + ms) % 2 == 1),
val,
jnp.zeros(2 * el + 1),
)
s_elm = s_elm.at[el, L - 1 - el : L + el].set(val)
return s_elm
@partial(jit, static_argnums=(1))
def _spin_normalization_jax(el: np.ndarray, spin: int = 0) -> float:
r"""JAX version of :func:`~_spin_normalization`.
Args:
el (int): Harmonic index :math:`\ell`.
spin (int): Spin of field over which to perform the transform. Defaults to 0.
Returns:
float: Normalization factor for spin-lowered wavelets.
"""
factor = jnp.arange(-abs(spin) + 1, abs(spin) + 1).reshape(1, 2 * abs(spin) + 1)
factor = el.reshape(len(el), 1).dot(factor)
return jnp.sqrt(jnp.prod(factor, axis=1) ** (jnp.sign(spin)))
|
astro-informaticsREPO_NAMEs2wavPATH_START.@s2wav_extracted@s2wav-main@[email protected]@.PATH_END.py
|
{
"filename": "saveable_object.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/training/saving/saveable_object.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types for specifying saving and loading behavior."""
class SaveSpec:
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name, dtype=None, device=None):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save or callable that produces a tensor to save.
If the value is `None`, the `SaveSpec` is ignored.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
dtype: The data type of the Tensor. Required if `tensor` is callable.
Used for error checking in the restore op.
device: The device generating and consuming this tensor. Required if
`tensor` is callable. Used to group objects to save by device.
"""
self._tensor = tensor
self.slice_spec = slice_spec
self.name = name
if callable(self._tensor):
if dtype is None or device is None:
raise AssertionError(
"When passing a callable `tensor` to a SaveSpec, an explicit "
"dtype and device must be provided.")
self.dtype = dtype
self.device = device
else:
self.dtype = tensor.dtype
if device is not None:
self.device = device
else:
self.device = tensor.device
@property
def tensor(self):
return self._tensor() if callable(self._tensor) else self._tensor
class SaveableObject:
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
@property
def device(self):
"""The device for SaveSpec Tensors."""
return self.specs[0].device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@training@saving@[email protected]_END.py
|
{
"filename": "test_bookkeeping.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/core/tests/test_bookkeeping.py",
"type": "Python"
}
|
# pytest suite
"""
Tests for primitives_bookkeeping.
This is a suite of tests to be run with pytest.
To run:
1) Set the environment variable GEMPYTHON_TESTDATA to the path that
contains the directories with the test data.
Eg. /net/chara/data2/pub/gempython_testdata/
2) From the ??? (location): pytest -v --capture=no
"""
# TODO @bquint: clean up these tests
import astrodata
import gemini_instruments
import os
import pytest
# from . import ad_compare
from geminidr.niri.primitives_niri_image import NIRIImage
from geminidr.gmos.primitives_gmos_image import GMOSImage
from gempy.utils import logutils
TESTDATAPATH = os.getenv('GEMPYTHON_TESTDATA', '.')
logfilename = 'test_bookkeeping.log'
# --- Fixtures ---
@pytest.fixture(scope="class")
def log():
if os.path.exists(logfilename):
os.remove(logfilename)
log = logutils.get_logger(__name__)
log.root.handlers = []
logutils.config(mode='standard', file_name=logfilename)
yield log
os.remove(logfilename)
@pytest.fixture(scope="function")
def niri_ads(request, astrofaker):
return [astrofaker.create('NIRI', ['IMAGE'], filename=f"X{i+1}.fits")
for i in range(request.param)]
# --- Tests ---
@pytest.mark.parametrize('niri_ads', [3], indirect=True)
def test_append_stream(niri_ads):
"""Some manipulation of streams using appendStream()"""
def filenames(stream):
return ''.join([ad.filename[1] for ad in stream])
p = NIRIImage(niri_ads[:1])
p.streams['test'] = niri_ads[1:2]
# Add the AD in 'test' to 'main' leaving it in 'test'
p.appendStream(from_stream='test', copy=True)
assert len(p.streams['main']) == 2
assert len(p.streams['test']) == 1
# Change filename of version in 'test' to confirm that the one in 'main'
# is not simply a reference
p.streams['test'][0].filename = 'X4.fits'
assert filenames(p.streams['main']) == '12'
# Add the copy in 'test' to 'main', and delete 'test'
p.appendStream(from_stream='test', copy=False)
assert len(p.streams['main']) == 3
assert filenames(p.streams['main']) == '124'
# Take 'test2', append 'main', and put the result in 'main'
p.streams['test2'] = niri_ads[2:]
p.appendStream(instream='test2', from_stream='main')
assert filenames(p.streams['main']) == '3124'
@pytest.mark.parametrize('niri_ads', [2], indirect=True)
def test_clear_all_streams(niri_ads):
p = NIRIImage(niri_ads[:1])
p.streams['test'] = niri_ads[1:]
p.clearAllStreams()
assert not p.streams['test']
assert len(p.streams['main']) == 1
@pytest.mark.parametrize('niri_ads', [2], indirect=True)
def test_clear_stream(niri_ads):
p = NIRIImage(niri_ads[:1])
p.streams['test'] = niri_ads[1:]
p.clearStream(stream='test')
assert not p.streams['test']
assert len(p.streams['main']) == 1
p.clearStream()
assert not p.streams['main']
def test_slice_into_streams(astrofaker):
def gmos_ads():
ad1 = astrofaker.create("GMOS-N")
ad1.init_default_extensions()
ad2 = astrofaker.create("GMOS-N")
ad2.init_default_extensions()
return [ad1, ad2]
# Slice, clearing "main"
p = GMOSImage(gmos_ads())
p.sliceIntoStreams(copy=False)
p.clearStream()
assert len(p.streams) == 13
for k, v in p.streams.items():
assert len(v) == 0 if k == 'main' else 2
# Slice, not clearing "main"
p = GMOSImage(gmos_ads())
p.sliceIntoStreams(copy=True)
assert len(p.streams) == 13
for k, v in p.streams.items():
assert len(v) == 2
# Slice with different lengths of input
ad1, ad2 = gmos_ads()
ad2.phu['EXTRA_KW'] = 33
del ad1[5]
p = GMOSImage([ad1, ad2])
p.sliceIntoStreams(copy=True)
assert len(p.streams) == 13
for k, v in p.streams.items():
assert len(v) == 1 if k == 'ext12' else 2
# The last stream should only have a slice from ad2
assert 'EXTRA_KW' in p.streams['ext12'][0].phu
class TestBookkeeping:
"""
Suite of tests for the functions in the primitives_standardize module.
"""
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_addToList(self):
filenames = ['N20070819S{:04d}_flatCorrected.fits'.format(i)
for i in range(104, 109)]
adinputs = [astrodata.open(os.path.join(TESTDATAPATH, 'NIRI', f))
for f in filenames]
# Add one image twice, just for laughs; it should appear only once
adinputs.append(adinputs[0])
p = NIRIImage(adinputs)
p.stacks = {}
p.addToList(purpose='forTest')
for f in filenames:
newfilename = f.replace('flatCorrected', 'forTest')
assert os.path.exists(newfilename)
os.remove(newfilename)
# Check there's one stack of length 5
assert len(p.stacks) == 1
assert len(p.stacks[p.stacks.keys()[0]]) == 5
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_getList(self):
pass
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_showInputs(self):
pass
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_showList(self):
pass
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_writeOutputs(self):
filenames = ['N20070819S{:04d}_flatCorrected.fits'.format(i)
for i in range(104, 106)]
adinputs = [astrodata.open(os.path.join(TESTDATAPATH, 'NIRI', f))
for f in filenames]
p = NIRIImage(adinputs)
p.writeOutputs(prefix='test', suffix='_blah', strip=True)
# Check renamed files are on disk and the filenames have been
# changed for the adinputs
for f, ad in zip(filenames, p.streams['main']):
newfilename = 'test' + f.replace('flatCorrected', 'blah')
assert os.path.exists(newfilename)
os.remove(newfilename)
assert newfilename == ad.filename
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@core@tests@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ggmichael/craterstats",
"repo_path": "craterstats_extracted/craterstats-main/src/craterstats/sample/__init__.py",
"type": "Python"
}
|
ggmichaelREPO_NAMEcraterstatsPATH_START.@craterstats_extracted@craterstats-main@src@craterstats@sample@[email protected]_END.py
|
|
{
"filename": "pi_pi_eta.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/vector_mediator/form_factors/pi_pi_eta.py",
"type": "Python"
}
|
"""
F_{eta,pi,pi} = (1/Z) * BW(s, 0) [
a0*e^{i*p0}BW(q^2,0) +
a1*e^{i*p1}BW(q^2,1) +
a2*e^{i*p2}BW(q^2,2)
]
Z = a0*e^{i*p0} + a1*e^{i*p1} + a2*e^{i*p2}
"""
from dataclasses import dataclass
import numpy as np
from scipy.integrate import quad
from .cross_sections import width_to_cs
from hazma.utils import kallen_lambda
from hazma.vector_mediator.form_factors.utils import (
FPI_GEV,
META_GEV,
MPI_GEV,
RealArray,
)
META = META_GEV * 1e3
MPI = MPI_GEV * 1e3
@dataclass
class FormFactorPiPiEta:
masses: RealArray = np.array([0.77549, 1.54, 1.76, 2.15])
widths: RealArray = np.array([0.1494, 0.356, 0.113, 0.32])
amps: RealArray = np.array([1.0, 0.326, 0.0115, 0.0])
phases: RealArray = np.array([0, 3.14, 3.14, 0.0])
def __bw0(self, s):
m0 = self.masses[0]
w0 = self.widths[0]
w = (
w0
* m0**2
/ s
* ((s - 4.0 * MPI_GEV**2) / (m0**2 - 4.0 * MPI_GEV**2)) ** 1.5
)
return m0**2 / (m0**2 - s - 1j * np.sqrt(s) * w)
def __bw(self, s):
w = self.widths * s / self.masses**2
bw = self.masses**2 / (self.masses**2 - s - 1j * np.sqrt(s) * w)
bw[0] = self.__bw0(s)
return bw
def form_factor(self, cme, s, gvuu, gvdd):
"""
Compute the form factor for a vector decaying into two charged pions and
an eta.
Parameters
----------
q2:
Square of the center-of-mass energy in GeV.
"""
pre = 1.0 / (4.0 * np.sqrt(3.0) * np.pi**2 * FPI_GEV**3)
ci1 = gvuu - gvdd
amps = self.amps * np.exp(1j * self.phases)
amps /= np.sum(amps)
return pre * ci1 * self.__bw0(s) * np.sum(amps * self.__bw(cme**2))
def __integrated_form_factor(
self, *, cme: float, gvuu: float, gvdd: float
) -> float:
"""
Compute the form factor for a vector decaying into two charged pions and
a neutral pion integrated over the three-body phase-space.
Parameters
----------
q2:
Square of the center-of-mass energy in GeV.
"""
mpi = MPI_GEV
meta = META_GEV
if cme < 2 * mpi + meta:
return 0.0
jac = 1 / (128.0 * np.pi**3 * cme**2)
def integrand(s):
f2 = np.abs(self.form_factor(cme, s, gvuu, gvdd)) ** 2
k1 = kallen_lambda(s, cme**2, meta**2)
k2 = kallen_lambda(s, mpi**2, mpi**2)
return (k1 * k2) ** 1.5 * f2 / (72 * s**2)
lb = (2 * mpi) ** 2
ub = (cme - meta) ** 2
return jac * quad(integrand, lb, ub)[0]
def integrated_form_factor(self, *, cme: float, gvuu: float, gvdd: float) -> float:
"""
Compute the form factor for a vector decaying into two charged pions and
a neutral pion integrated over the three-body phase-space.
Parameters
----------
q2:
Square of the center-of-mass energy in MeV.
"""
cme_gev = cme * 1e-3
integral = self.__integrated_form_factor(cme=cme_gev, gvuu=gvuu, gvdd=gvdd)
return integral * 1e6
def width(self, *, mv: float, gvuu: float, gvdd: float) -> float:
if mv < 2 * MPI + META:
return 0.0
integral = self.integrated_form_factor(cme=mv, gvuu=gvuu, gvdd=gvdd)
return integral / (2 * mv)
def cross_section(
self,
*,
cme,
mx: float,
mv: float,
gvuu: float,
gvdd: float,
gamv: float,
):
rescale = width_to_cs(cme=cme, mx=mx, mv=mv, wv=gamv)
return rescale * self.width(mv=cme, gvuu=gvuu, gvdd=gvdd)
def energy_distributions(self, cme, gvuu, gvdd, nbins):
if cme < 2 * MPI + META:
return [([], []), ([], []), ([], [])]
def edist(e, m1, m2, m3):
s = cme**2 + m1**2 - 2 * cme * e
if s <= (m2 + m3) ** 2 or s >= (cme - m1) ** 2:
return 0.0
k1 = kallen_lambda(s, m1**2, cme**2)
k2 = kallen_lambda(s, m2**2, m3**2)
return (k1 * k2) ** 1.5 / (s**2)
def ebounds(m1, m2, m3):
return m1, (cme**2 + m1**2 - (m2 + m3) ** 2) / (2 * cme)
def make_dist(m1, m2, m3):
elow, ehigh = ebounds(m1, m2, m3)
edges = np.linspace(elow, ehigh, nbins + 1)
es = 0.5 * (edges[1:] + edges[:-1])
norm = quad(lambda e: edist(e, m1, m2, m3), elow, ehigh)[0]
dist = [edist(e, m1, m2, m3) / norm for e in es]
return dist, es
dist_pi, es_pi = make_dist(MPI, MPI, META)
dist_eta, es_eta = make_dist(META, MPI, MPI)
return [(dist_pi, es_pi), (dist_pi, es_pi), (dist_eta, es_eta)]
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@vector_mediator@form_factors@[email protected]_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattercarpet/_legendgrouptitle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="legendgrouptitle", parent_name="scattercarpet", **kwargs
):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattercarpet@[email protected]_END.py
|
{
"filename": "make_figure_07.ipynb",
"repo_name": "tcallister/learning-p-det",
"repo_path": "learning-p-det_extracted/learning-p-det-main/figures/make_figure_07.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker
mpl.style.use("plotting.mplstyle")
import pandas as pd
import sys
sys.path.append('./../../pdet/')
from pdet.emulator import *
sys.path.append("./../code/")
from training_routines import *
from draw_new_injections import draw_new_injections
import tqdm
from figure_utilities import *
np.random.seed(230529)
cbbh = '#1f78b4'
cnsbh = '#33a02c'
cbns = '#e31a1c'
cmisc = '#6a3d9a'
```
```python
# Load network
ann = pdet_O3()
jitted_ann = jax.jit(ann)
```
```python
# Draw CDFs to be used in dynamic injection generation
injectionData = draw_vals(100000)
# Prep arrays to hold estimated efficiencies and sample sizes
n = 100
inj_effs = np.zeros(n)
nn_effs = np.zeros(n)
neff_inj = np.zeros(n)
neff_nn = np.zeros(n)
# Choose population hyperparameters, and prepare
# array of log-widths for cos tilt distribution
alphas = -3.
kappas = 3.
mu_m1 = 35.
sig_m1 = 5.
log_f_peaks = -3.
mMaxs = 80.
mMins = 10.
log_dmMaxs = 1.
log_dmMins = 0.
bqs = 2.
mu_chis = 0.
logsig_chis = np.linspace(-4,0.5,n)
f_iso = 0.5
mu_costs = 1.
sig_costs = 0.5
# Loop across parameters
for i in tqdm.tqdm(range(n)):
# Reweight pipeline injections.
# First argument is estimated detection efficiency,
# second is number of effective samples in Monte Carlo average
inj_effs[i], neff_inj[i] = get_inj_efficiency(
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis[i],
f_iso,
mu_costs,
sig_costs,
kappas)
# Use neural net to directly average Pdet over proposed population
# (subject to reweighting in redshift, as described in paper text).
# First argument is estimated detection efficiency. Second is number
# of effective samples in Monte Carlo integral over Pdet.
# Third (ignored) is number of effective draws from target distribution,
# after reweighting in redshift
nn_effs[i], neff_nn[i], _ = get_nn_efficiency(
jitted_ann,
injectionData,
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis[i],
f_iso,
mu_costs,
sig_costs,
kappas,
hybrid=True)
```
100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:26<00:00, 3.72it/s]
```python
# Plot
fig = plt.figure(figsize=(4,5.5))
ax = fig.add_subplot(211)
sorting = np.argsort(logsig_chis)
ax.plot(logsig_chis[sorting],(inj_effs)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(logsig_chis[sorting],(nn_effs)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_ylabel(r"Predicted Detection Efficiency")
ax.legend(loc='lower right',fontsize=9)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False, useMathText=True)
formatter.set_powerlimits((-2, 2))
ax.yaxis.set_major_formatter(formatter)
ax.set_xticklabels([])
ax = fig.add_subplot(212)
ax.plot(logsig_chis[sorting],np.log10(neff_inj)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(logsig_chis[sorting],np.log10(neff_nn)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_xlabel(r"$\log_{10}\sigma_\chi$")
ax.set_ylabel(r"$\log_{10} N_\mathrm{eff}$")
ax.legend(loc='lower right',fontsize=9)
ax.axhline(y=np.log10(4*59),color='black',ls=':',zorder=0)
ax.text(0.06,0.55,r'$N_\mathrm{eff} = 4\times N_\mathrm{events}$',transform=ax.transAxes,fontsize=10)
plt.tight_layout()
plt.savefig('figure_07.pdf',bbox_inches='tight')
plt.show()
```

```python
test = draw_vals(100000)
n = 50
inj_effs = np.zeros(n)
nn_effs = np.zeros(n)
neff_inj = np.zeros(n)
neff_nn = np.zeros(n)
alphas = -3.
kappas = np.linspace(-1,4,n)
mu_m1 = 35.
sig_m1 = 5.
log_f_peaks = -3.
mMaxs = 80.
mMins = 10.
log_dmMaxs = 1.
log_dmMins = 0.
bqs = 2.
mu_chis = 0.
logsig_chis = -1
f_iso = 0.5
mu_costs = 1
sig_costs = 0.5
for i in tqdm.tqdm(range(n)):
inj_effs[i],neff_inj[i] = get_inj_efficiency(
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis,
f_iso,
mu_costs,
sig_costs,
kappas[i])
nn_effs[i],neff_nn[i],_ = get_nn_efficiency(
jitted_ann,
injectionData,
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis,
f_iso,
mu_costs,
sig_costs,
kappas[i],
hybrid=True)
```
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [00:13<00:00, 3.75it/s]
```python
fig = plt.figure(figsize=(4,5.5))
ax = fig.add_subplot(211)
sorting = np.argsort(kappas)
ax.plot(kappas[sorting],(inj_effs)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(kappas[sorting],(nn_effs)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_ylabel(r"Predicted Detection Efficiency")
ax.legend(loc='lower right',fontsize=9)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False, useMathText=True)
formatter.set_powerlimits((-2, 2))
ax.yaxis.set_major_formatter(formatter)
ax.set_xticklabels([])
ax.set_yscale('log')
ax = fig.add_subplot(212)
ax.plot(kappas[sorting],np.log10(neff_inj)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(kappas[sorting],np.log10(neff_nn)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_xlabel(r"$\log_{10}\sigma_\chi$")
ax.set_ylabel(r"$\log_{10} N_\mathrm{eff}$")
ax.legend(loc='lower right',fontsize=9)
ax.axhline(y=np.log10(4*59),color='black',ls=':',zorder=0)
ax.text(0.06,0.5,r'$N_\mathrm{eff} = 4\times N_\mathrm{events}$',transform=ax.transAxes,fontsize=10)
plt.tight_layout()
#plt.savefig('figure_07.pdf',bbox_inches='tight')
plt.show()
```

```python
```
|
tcallisterREPO_NAMElearning-p-detPATH_START.@learning-p-det_extracted@learning-p-det-main@figures@[email protected]_END.py
|
{
"filename": "io.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/io.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IO Abstraction Layer.
The sole purpose of this abstraction layer is to avoid requiring tensorflow
as an open-source dependency solely for its tensorflow.io.gfile functions.
"""
import contextlib
import glob as glob_module
import importlib
import os
import shutil
from enum import Enum
from absl import logging
from . import errors
# Global Modes and selective import of tensorflow.io gfile.
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
gfile = None
if importlib.util.find_spec('tensorflow'):
from tensorflow.io import gfile # type: ignore
io_mode = BackendMode.TF
else:
logging.warning(
'Tensorflow library not found, tensorflow.io.gfile '
'operations will use native shim calls. '
"GCS paths (i.e. 'gs://...') cannot be accessed."
)
io_mode = BackendMode.DEFAULT
# Constants and Exceptions
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
# Overrides for testing.
@contextlib.contextmanager
def override_mode(override: BackendMode):
# pylint: disable=g-doc-return-or-yield
"""Returns a context manager that changes backend IO mode.
Args:
override: BackendMode enum value to set IO mode inside context.
"""
# pylint: enable=g-doc-return-or-yield
global io_mode
io_mode_prev = io_mode
io_mode = override
try:
yield
finally:
io_mode = io_mode_prev
def set_mode(override: BackendMode):
"""Sets global io mode.
Args:
override: BackendMode enum value to set for IO mode.
"""
global io_mode
io_mode = override
# tensorflow.io.gfile API shim functions.
def GFile(name, mode): # pylint: disable=invalid-name
if io_mode == BackendMode.DEFAULT:
if 'b' in mode:
return open(name, mode) # pylint: disable=unspecified-encoding
else:
return open(name, mode, encoding='utf-8')
elif io_mode == BackendMode.TF:
return gfile.GFile(name, mode)
else:
raise ValueError('Unknown IO Backend Mode.')
def listdir(path):
if io_mode == BackendMode.DEFAULT:
return os.listdir(path=path)
elif io_mode == BackendMode.TF:
return gfile.listdir(path=path)
else:
raise ValueError('Unknown IO Backend Mode.')
def isdir(path):
if io_mode == BackendMode.DEFAULT:
return os.path.isdir(path)
elif io_mode == BackendMode.TF:
return gfile.isdir(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def copy(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.exists(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
shutil.copy(src, dst)
return
elif io_mode == BackendMode.TF:
return gfile.copy(src, dst, overwrite=overwrite)
else:
raise ValueError('Unknown IO Backend Mode.')
def rename(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.exists(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
return os.rename(src, dst)
elif io_mode == BackendMode.TF:
return gfile.rename(src, dst, overwrite=overwrite)
else:
raise ValueError('Unknown IO Backend Mode.')
def exists(path):
if io_mode == BackendMode.DEFAULT:
return os.path.exists(path)
elif io_mode == BackendMode.TF:
return gfile.exists(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def makedirs(path):
if io_mode == BackendMode.DEFAULT:
return os.makedirs(path, exist_ok=True)
elif io_mode == BackendMode.TF:
return gfile.makedirs(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def glob(pattern):
if io_mode == BackendMode.DEFAULT:
return [
path.rstrip('/') for path in glob_module.glob(pattern, recursive=False)
]
elif io_mode == BackendMode.TF:
return gfile.glob(pattern)
else:
raise ValueError('Unknown IO Backend Mode.')
def remove(path):
"""Remove the file at path. Might fail if used on a directory path."""
if io_mode == BackendMode.DEFAULT:
return os.remove(path)
elif io_mode == BackendMode.TF:
return gfile.remove(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def rmtree(path):
"""Remove a directory and recursively all contents inside. Might fail if used on a file path."""
if io_mode == BackendMode.DEFAULT:
return shutil.rmtree(path)
elif io_mode == BackendMode.TF:
return gfile.rmtree(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def getsize(path):
"""Return the size, in bytes, of path."""
if io_mode == BackendMode.DEFAULT:
return os.path.getsize(path)
elif io_mode == BackendMode.TF:
return gfile.stat(path).length
else:
raise ValueError('Unknown IO Backend Mode.')
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@[email protected]@.PATH_END.py
|
{
"filename": "data_structures.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/exodus_ii/data_structures.py",
"type": "Python"
}
|
import numpy as np
from yt.data_objects.index_subobjects.unstructured_mesh import UnstructuredMesh
from yt.data_objects.static_output import Dataset
from yt.data_objects.unions import MeshUnion
from yt.funcs import setdefaultattr
from yt.geometry.unstructured_mesh_handler import UnstructuredIndex
from yt.utilities.file_handler import NetCDF4FileHandler, valid_netcdf_signature
from yt.utilities.logger import ytLogger as mylog
from .fields import ExodusIIFieldInfo
from .util import get_num_pseudo_dims, load_info_records, sanitize_string
class ExodusIIUnstructuredMesh(UnstructuredMesh):
_index_offset = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class ExodusIIUnstructuredIndex(UnstructuredIndex):
def __init__(self, ds, dataset_type="exodus_ii"):
super().__init__(ds, dataset_type)
def _initialize_mesh(self):
coords = self.ds._read_coordinates()
connectivity = self.ds._read_connectivity()
self.meshes = []
for mesh_id, conn_ind in enumerate(connectivity):
displaced_coords = self.ds._apply_displacement(coords, mesh_id)
mesh = ExodusIIUnstructuredMesh(
mesh_id, self.index_filename, conn_ind, displaced_coords, self
)
self.meshes.append(mesh)
self.mesh_union = MeshUnion("mesh_union", self.meshes)
def _detect_output_fields(self):
elem_names = self.dataset.parameters["elem_names"]
node_names = self.dataset.parameters["nod_names"]
fnames = elem_names + node_names
self.field_list = []
for i in range(1, len(self.meshes) + 1):
self.field_list += [("connect%d" % i, fname) for fname in fnames]
self.field_list += [("all", fname) for fname in fnames]
class ExodusIIDataset(Dataset):
_load_requirements = ["netCDF4"]
_index_class = ExodusIIUnstructuredIndex
_field_info_class = ExodusIIFieldInfo
def __init__(
self,
filename,
step=0,
displacements=None,
dataset_type="exodus_ii",
storage_filename=None,
units_override=None,
):
"""
A class used to represent an on-disk ExodusII dataset. The initializer takes
two extra optional parameters, "step" and "displacements."
Parameters
----------
step : integer
The step tells which time index to slice at. It throws an Error if
the index is larger than the number of time outputs in the ExodusII
file. Passing step=-1 picks out the last dataframe.
Default is 0.
displacements : dictionary of tuples
This is a dictionary that controls whether or not displacement fields
will be used with the meshes in this dataset. The keys of the
displacements dictionary should the names of meshes in the file
(e.g., "connect1", "connect2", etc... ), while the values should be
tuples of the form (scale, offset), where "scale" is a floating point
value and "offset" is an array-like with one component for each spatial
dimension in the dataset. When the displacements for a given mesh are
turned on, the coordinates of the vertices in that mesh get transformed
as:
vertex_x = vertex_x + disp_x*scale + offset_x
vertex_y = vertex_y + disp_y*scale + offset_y
vertex_z = vertex_z + disp_z*scale + offset_z
If no displacement
fields (assumed to be named 'disp_x', 'disp_y', etc... ) are detected in
the output file, then this dictionary is ignored.
Examples
--------
This will load the Dataset at time index '0' with displacements turned off.
>>> import yt
>>> ds = yt.load("MOOSE_sample_data/mps_out.e")
This will load the Dataset at the final index with displacements turned off.
>>> import yt
>>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
This will load the Dataset at index 10, turning on displacement fields for
the 2nd mesh without applying any scale or offset:
>>> import yt
>>> ds = yt.load(
... "MOOSE_sample_data/mps_out.e",
... step=10,
... displacements={"connect2": (1.0, [0.0, 0.0, 0.0])},
... )
This will load the Dataset at index 10, scaling the displacements
in the 2nd mesh by a factor of 5 while not applying an offset:
>>> import yt
>>> ds = yt.load(
... "MOOSE_sample_data/mps_out.e",
... step=10,
... displacements={"connect2": (5.0, [0.0, 0.0, 0.0])},
... )
This will load the Dataset at index 10, scaling the displacements for
the 2nd mesh by a factor of 5.0 and shifting all the vertices in
the first mesh by 1.0 unit in the z direction.
>>> import yt
>>> ds = yt.load(
... "MOOSE_sample_data/mps_out.e",
... step=10,
... displacements={
... "connect1": (0.0, [0.0, 0.0, 1.0]),
... "connect2": (5.0, [0.0, 0.0, 0.0]),
... },
... )
"""
self.step = step
if displacements is None:
self.displacements = {}
else:
self.displacements = displacements
self.storage_filename = storage_filename
super().__init__(filename, dataset_type, units_override=units_override)
self.fluid_types += self._get_fluid_types()
self.default_field = [f for f in self.field_list if f[0] == "connect1"][-1]
@property
def index_filename(self):
# historic alias
return self.filename
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
#
setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
#
# These can also be set:
# self.velocity_unit = self.quan(1.0, "cm/s")
# self.magnetic_unit = self.quan(1.0, "gauss")
def _parse_parameter_file(self):
self._handle = NetCDF4FileHandler(self.parameter_filename)
with self._handle.open_ds() as ds:
self._read_glo_var()
self.dimensionality = ds.variables["coor_names"].shape[0]
self.parameters["info_records"] = self._load_info_records()
self.num_steps = len(ds.variables["time_whole"])
self.current_time = self._get_current_time()
self.parameters["num_meshes"] = ds.variables["eb_status"].shape[0]
self.parameters["elem_names"] = self._get_elem_names()
self.parameters["nod_names"] = self._get_nod_names()
self.domain_left_edge, self.domain_right_edge = self._load_domain_edge()
self._periodicity = (False, False, False)
# These attributes don't really make sense for unstructured
# mesh data, but yt warns if they are not present, so we set
# them to dummy values here.
self.domain_dimensions = np.ones(3, "int32")
self.cosmological_simulation = 0
self.current_redshift = 0
self.omega_lambda = 0
self.omega_matter = 0
self.hubble_constant = 0
self.refine_by = 0
def _get_fluid_types(self):
with NetCDF4FileHandler(self.parameter_filename).open_ds() as ds:
fluid_types = ()
i = 1
while True:
ftype = "connect%d" % i
if ftype in ds.variables:
fluid_types += (ftype,)
i += 1
else:
break
fluid_types += ("all",)
return fluid_types
def _read_glo_var(self):
"""
Adds each global variable to the dict of parameters
"""
names = self._get_glo_names()
if not names:
return
with self._handle.open_ds() as ds:
values = ds.variables["vals_glo_var"][:].transpose()
for name, value in zip(names, values, strict=True):
self.parameters[name] = value
def _load_info_records(self):
"""
Returns parsed version of the info_records.
"""
with self._handle.open_ds() as ds:
try:
return load_info_records(ds.variables["info_records"])
except (KeyError, TypeError):
mylog.warning("No info_records found")
return []
def _get_current_time(self):
with self._handle.open_ds() as ds:
try:
return ds.variables["time_whole"][self.step]
except IndexError as e:
raise RuntimeError(
"Invalid step number, max is %d" % (self.num_steps - 1)
) from e
except (KeyError, TypeError):
return 0.0
def _get_glo_names(self):
"""
Returns the names of the global vars, if available.
"""
with self._handle.open_ds() as ds:
if "name_glo_var" not in ds.variables:
mylog.warning("name_glo_var not found")
return []
else:
return [
sanitize_string(v.tobytes()) for v in ds.variables["name_glo_var"]
]
def _get_elem_names(self):
"""
Returns the names of the element vars, if available.
"""
with self._handle.open_ds() as ds:
if "name_elem_var" not in ds.variables:
mylog.warning("name_elem_var not found")
return []
else:
return [
sanitize_string(v.tobytes()) for v in ds.variables["name_elem_var"]
]
def _get_nod_names(self):
"""
Returns the names of the node vars, if available
"""
with self._handle.open_ds() as ds:
if "name_nod_var" not in ds.variables:
mylog.warning("name_nod_var not found")
return []
else:
return [
sanitize_string(v.tobytes()) for v in ds.variables["name_nod_var"]
]
def _read_coordinates(self):
"""
Loads the coordinates for the mesh
"""
coord_axes = "xyz"[: self.dimensionality]
mylog.info("Loading coordinates")
with self._handle.open_ds() as ds:
if "coord" not in ds.variables:
coords = (
np.array([ds.variables[f"coord{ax}"][:] for ax in coord_axes])
.transpose()
.astype("f8")
)
else:
coords = (
np.array(list(ds.variables["coord"][:])).transpose().astype("f8")
)
return coords
def _apply_displacement(self, coords, mesh_id):
mesh_name = "connect%d" % (mesh_id + 1)
new_coords = coords.copy()
if mesh_name not in self.displacements:
return new_coords
fac = self.displacements[mesh_name][0]
offset = self.displacements[mesh_name][1]
coord_axes = "xyz"[: self.dimensionality]
with self._handle.open_ds() as ds:
for i, ax in enumerate(coord_axes):
if f"disp_{ax}" in self.parameters["nod_names"]:
ind = self.parameters["nod_names"].index(f"disp_{ax}")
disp = ds.variables["vals_nod_var%d" % (ind + 1)][self.step]
new_coords[:, i] = coords[:, i] + fac * disp + offset[i]
return new_coords
def _read_connectivity(self):
"""
Loads the connectivity data for the mesh
"""
mylog.info("Loading connectivity")
connectivity = []
with self._handle.open_ds() as ds:
for i in range(self.parameters["num_meshes"]):
var = ds.variables["connect%d" % (i + 1)][:].astype("i8")
try:
elem_type = var.elem_type.lower()
if elem_type == "nfaced":
raise NotImplementedError(
"3D arbitrary polyhedra are not implemented yet"
)
arbitrary_polyhedron = elem_type == "nsided"
except AttributeError:
arbitrary_polyhedron = False
conn = var[:]
if arbitrary_polyhedron:
nodes_per_element = ds.variables[f"ebepecnt{i + 1}"]
npe = nodes_per_element[0]
if np.any(nodes_per_element != npe):
raise NotImplementedError("only equal-size polyhedra supported")
q, r = np.divmod(len(conn), npe)
assert r == 0
conn.shape = (q, npe)
connectivity.append(conn)
return connectivity
def _load_domain_edge(self):
"""
Loads the boundaries for the domain edge
"""
coords = self._read_coordinates()
connectivity = self._read_connectivity()
mi = 1e300
ma = -1e300
for mesh_id, _ in enumerate(connectivity):
displaced_coords = self._apply_displacement(coords, mesh_id)
mi = np.minimum(displaced_coords.min(axis=0), mi)
ma = np.maximum(displaced_coords.max(axis=0), ma)
# pad domain boundaries
width = ma - mi
mi -= 0.1 * width
ma += 0.1 * width
# set up pseudo-3D for lodim datasets here
for _ in range(self.dimensionality, 3):
mi = np.append(mi, 0.0)
ma = np.append(ma, 1.0)
num_pseudo_dims = get_num_pseudo_dims(coords)
self.dimensionality -= num_pseudo_dims
for i in range(self.dimensionality, 3):
mi[i] = 0.0
ma[i] = 1.0
return mi, ma
@classmethod
def _is_valid(cls, filename: str, *args, **kwargs) -> bool:
if not valid_netcdf_signature(filename):
return False
if cls._missing_load_requirements():
return False
try:
from netCDF4 import Dataset
# We use keepweakref here to avoid holding onto the file handle
# which can interfere with other is_valid calls.
with Dataset(filename, keepweakref=True) as f:
f.variables["connect1"]
return True
except Exception:
return False
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@exodus_ii@[email protected]_END.py
|
{
"filename": "DQ_definitions.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gemini/lookups/DQ_definitions.py",
"type": "Python"
}
|
import numpy as np
datatype = np.uint16
max = np.iinfo(datatype).max
good = datatype(0)
bad_pixel = datatype(1)
non_linear = datatype(2)
saturated = datatype(4)
cosmic_ray = datatype(8)
no_data = datatype(16)
overlap = datatype(32)
unilluminated = datatype(64)
fail = bad_pixel | saturated | cosmic_ray | no_data
not_signal = max ^ (non_linear | saturated)
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gemini@lookups@[email protected]_END.py
|
{
"filename": "utils.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/wcs/wcsapi/utils.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import numpy as np
__all__ = ["deserialize_class", "wcs_info_str"]
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple(
deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]
)
kwargs = dict(
(key, deserialize_class(val)) if isinstance(val, tuple) else (key, val)
for (key, val) in tpl[2].items()
)
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
def wcs_info_str(wcs):
# Overall header
if wcs.array_shape is None:
array_shape = None
else:
array_shape = tuple(int(n) for n in wcs.array_shape)
s = (
f"{type(wcs).__name__} Transformation\n\n"
f"This transformation has {wcs.pixel_n_dim} pixel and {wcs.world_n_dim} "
"world dimensions\n\n"
f"Array shape (Numpy order): {array_shape}\n\n"
)
# Pixel dimensions table
array_shape = array_shape or (0,)
pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))
pixel_nam_width = max(9, *map(len, wcs.pixel_axis_names))
pixel_siz_width = max(9, len(str(max(array_shape))))
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
# fmt: on
if wcs.pixel_bounds is None:
pixel_bounds = [None for _ in range(wcs.pixel_n_dim)]
else:
# converting to scalar arrays and back to Python with np.array(val).item()
# guarantees that we end up with Python scalars (int or float) with
# simple reprs, while not making any unnecessary type promotion
# (e.g. int to float)
pixel_bounds = [
tuple(np.array(b).item() for b in bounds) for bounds in wcs.pixel_bounds
]
for ipix in range(wcs.pixel_n_dim):
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +
(" " * 5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +
f"{pixel_bounds[ipix]}\n"
)
# fmt: on
s += "\n"
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(wcs.world_n_dim)))
world_nam_width = max(9, *(len(x) for x in wcs.world_axis_names if x is not None))
world_typ_width = max(
[13] + [len(x) for x in wcs.world_axis_physical_types if x is not None]
)
# fmt: off
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
# fmt: on
for iwrl in range(wcs.world_n_dim):
name = wcs.world_axis_names[iwrl] or "None"
typ = wcs.world_axis_physical_types[iwrl] or "None"
unit = wcs.world_axis_units[iwrl] or "unknown"
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +
'{:s}'.format(unit + '\n'))
# fmt: on
s += "\n"
# Axis correlation matrix
pixel_dim_width = max(3, len(str(wcs.world_n_dim)))
s += "Correlation between pixel and world axes:\n\n"
# fmt: off
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
matrix = wcs.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype="U3")
matrix_str[matrix] = "yes"
matrix_str[~matrix] = "no"
for iwrl in range(wcs.world_n_dim):
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
# Make sure we get rid of the extra whitespace at the end of some lines
return "\n".join([l.rstrip() for l in s.splitlines()])
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@wcs@[email protected]@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "tvwenger/kd",
"repo_path": "kd_extracted/kd-master/setup.py",
"type": "Python"
}
|
"""
Copyright(C) 2017-2021 by
Trey V. Wenger; [email protected]
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup
setup(
name="kd",
version="2.1",
description="Kinematic distance utilities",
author="Trey V. Wenger",
author_email="[email protected]",
packages=["kd"],
install_requires=["numpy<2.0.0", "matplotlib", "scipy", "pathos"],
package_data={"kd": ["curve_data_wise_small.sav", "reid19_params.pkl"]},
)
|
tvwengerREPO_NAMEkdPATH_START.@kd_extracted@[email protected]@.PATH_END.py
|
{
"filename": "increasebuffer.py",
"repo_name": "CosmicFish/CosmicFish",
"repo_path": "CosmicFish_extracted/CosmicFish-master/bundled/doxygen/src/increasebuffer.py",
"type": "Python"
}
|
# Since the internal token buffer of a generated flex file is hardcoded
# to 16K, this script is used to increase the buffer size of a flex
# generated scanner to 256K.
import sys
sys.stdout.write(sys.stdin.read().
replace('YY_BUF_SIZE 16384','YY_BUF_SIZE 262144').
replace('YY_READ_BUF_SIZE 8192','YY_READ_BUF_SIZE 262144'))
|
CosmicFishREPO_NAMECosmicFishPATH_START.@CosmicFish_extracted@CosmicFish-master@bundled@doxygen@[email protected]@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/image/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="layout.image", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@image@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ESDS-Leipzig/sen2nbar",
"repo_path": "sen2nbar_extracted/sen2nbar-main/sen2nbar/__init__.py",
"type": "Python"
}
|
"""sen2nbar - Nadir BRDF Adjusted Reflectance (NBAR) for Sentinel-2 in Python"""
__version__ = "2024.6.0"
__author__ = "David Montero Loaiza <[email protected]>"
__all__ = []
from . import *
|
ESDS-LeipzigREPO_NAMEsen2nbarPATH_START.@sen2nbar_extracted@sen2nbar-main@sen2nbar@[email protected]_END.py
|
{
"filename": "_h_e_a_d.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/ttLib/tables/_h_e_a_d.py",
"type": "Python"
}
|
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import (
timestampFromString,
timestampToString,
timestampNow,
)
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
log = logging.getLogger(__name__)
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ["maxp", "loca", "CFF ", "CFF2"]
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
log.warning("extra bytes at the end of 'head' table")
assert rest == b"\0\0"
# For timestamp fields, ignore the top four bytes. Some fonts have
# bogus values there. Since till 2038 those bytes only can be zero,
# ignore them.
#
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
for stamp in "created", "modified":
value = getattr(self, stamp)
if value > 0xFFFFFFFF:
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
value &= 0xFFFFFFFF
setattr(self, stamp, value)
if value < 0x7C259DC0: # January 1, 1970 00:00:00
log.warning(
"'%s' timestamp seems very low; regarding as unix timestamp", stamp
)
value += 0x7C259DC0
setattr(self, stamp, value)
def compile(self, ttFont):
if ttFont.recalcBBoxes:
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if "CFF " in ttFont:
topDict = ttFont["CFF "].cff.topDictIndex[0]
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
elif "CFF2" in ttFont:
topDict = ttFont["CFF2"].cff.topDictIndex[0]
charStrings = topDict.CharStrings
fontBBox = None
for charString in charStrings.values():
bounds = charString.calcBounds(charStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is not None:
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
_, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in fixes:
value = floatToFixedToStr(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampToString(value)
elif name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
fixes = sstruct.getformat(headFormat)[2]
if name in fixes:
value = strToFixedToFloat(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampFromString(value)
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@ttLib@tables@[email protected]_END.py
|
{
"filename": "bayesian.ipynb",
"repo_name": "sotzee/CPREX",
"repo_path": "CPREX_extracted/CPREX-main/bayesian.ipynb",
"type": "Jupyter Notebook"
}
|
```python
```
```python
import numpy as np
import scipy
import toolbox
import unitconvert
from matplotlib_rc import *
from load_all_data import *
from cov import cov_ellipse_xy
from pdf_plot import get_kde_1D,get_kde_2D,plot_density_2D
Fw_exp=np.array([0.1304,0.368])
Fw_sig2=np.array([0.0052**2+0.002**2,0.013**2])
Fc_exp=np.array([0.1581,0.409])
likelihood_name=['$R_{ch}, F_{ch}, BE$','+CREX+PREX','+CREX','+PREX']
likelihood_filename=['none','all','crex','prex']
scale_chi2=scipy.stats.chi2.ppf(np.array([0.6827,0.9545,0.9973]),df=2)**0.5
```
```python
```
```python
def plot_density_1D_L(x,density_array,percentile,color,color_list,ax,marginal_axis='x',unit='',legend_loc=0,figsize_norm=1,n=30,label_text_add='',ls='-'):
det_x=x[1]-x[0]
density_array=density_array/(density_array.sum()*det_x)
density_array_max=density_array.max()
x_max=x[density_array==density_array_max]
t = np.linspace(0, density_array_max, n)
integral = ((density_array >= t[:, None]) * density_array).sum(axis=(1))
f = scipy.interpolate.interp1d(integral, t)
t_contours = f(np.array(percentile)/det_x)
x_contours = []
density_countours = []
for index_list_flag in density_array>t_contours[:,None]:
index_list=np.where(index_list_flag)[0]
x_contours.append(x[[index_list.min(),index_list.max()]])
density_countours.append(density_array[[index_list.min(),index_list.max()]])
if(marginal_axis=='x'):
line=ax.plot(x,density_array,linewidth=2*figsize_norm,color=color,label=label_text_add,ls=ls)[0]
ax.tick_params(labelbottom=True, labelleft=False)
#ax.legend(fontsize=20*figsize_norm,frameon=False,loc=legend_loc,ncol=2)
return x_max,x_contours,density_countours,line
def PDF_DP(L,Sv,weights=1): #Dipole plolarizability corelation, https://journals.aps.org/prc/pdf/10.1103/PhysRevC.88.024316
return np.exp(-0.5*(L-6.11*Sv+146)**2/(0.1*Sv**2+1**2))/(2*np.pi*(0.1*Sv**2+1**2))**0.5*weights
```
```python
percentile_list = [0.68269]
color_list = ['y','k','tab:red','tab:blue','g','c']
ls_list=['--','-',(0, (2, 1, 1, 1)),(0, (4, 1, 1, 1))]
#label_list=['$R_{ch}, F_{ch}, BE$','+CREX+PREX','+CREX','+PREX']
label_list=['Basic nuclei','+CREX+PREX','+CREX','+PREX']
likelihood_name=label_list
#name='Skyrme'
#SAT_list=SAT_Skyrme_list
name='RMF'
SAT_list=SAT_RMF_to_Skyrme_list
fig, axes = plt.subplots(1,2,sharex=False,sharey=False,figsize=(8,3))
Sv_density_array_list=[]
Sv_x_contours_list=[]
Sv_x_max_list=[]
Sv_line_list=[]
for i in range(len(SAT_list)):
x=SAT_list[i][3] #Sv
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
Sv_density_array_list.append(density_countours)
Sv_x_contours_list.append(x_contours)
Sv_x_max_list.append(x_max)
Sv_line_list.append(line)
if(i==1):
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
Sv_x_max,Sv_x_contours,Sv_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
#first_legend=axes[0].legend(handles=line_list,frameon=False,fontsize=10,labelspacing=0.2,loc=2)
#axes[0].add_artist(first_legend)
axes[0].set_xlim(20,50)
axes[0].set_ylim(0,0.12)
axes[0].text(0.05*(50-20)+20,0.85*0.12,name,fontsize=16)
axes[0].set_xlabel('S$_v$ [MeV]',fontsize=15)
lines=[]
for i in range(len(likelihood_name)):
for j in range(len(x_contours)):
axes[0].plot([Sv_x_contours_list[i][j][0],Sv_x_contours_list[i][j][0]],[0,Sv_density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours_list[i][j][1],Sv_x_contours_list[i][j][1]],[0,Sv_density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label=likelihood_name[i])
if(i==1):
axes[0].plot([Sv_x_contours[j][0],Sv_x_contours[j][0]],[0,Sv_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label=likelihood_name[1]+'+$\\alpha_D$')
second_legend=axes[0].legend(handles=lines,frameon=False,fontsize=10,labelspacing=0.4,loc=1)
axes[0].add_artist(second_legend)
density_array_list=[]
x_contours_list=[]
x_max_list=[]
line_list=[]
lines=[]
for i in range(len(SAT_list)):
x=SAT_list[i][4] #L
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
density_array_list.append(density_countours)
x_contours_list.append(x_contours)
x_max_list.append(x_max)
line_list.append(line)
print(x_contours-x_max,x_max)
for j in range(len(x_contours)):
axes[1].plot([x_contours_list[i][j][0],x_contours_list[i][j][0]],[0,density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([x_contours_list[i][j][1],x_contours_list[i][j][1]],[0,density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max_list[i],Sv_x_contours_list[i][j][0]-Sv_x_max_list[i],Sv_x_contours_list[i][j][1]-Sv_x_max_list[i], x_max_list[i],x_contours_list[i][j][0]-x_max_list[i],x_contours_list[i][j][1]-x_max_list[i]))
if(i==1):
#L_PDF_post=interpolate.UnivariateSpline(x_grid,pdf_grid,k=3,s=0)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
#lines+=axes[1].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v=%.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max)+'MeV')
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
L_x_max,L_x_contours,L_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
axes[1].plot([L_x_contours[j][0],L_x_contours[j][0]],[0,L_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([L_x_contours[j][1],L_x_contours[j][1]],[0,L_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$'%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max, L_x_max,L_x_contours[j][0]-L_x_max,L_x_contours[j][1]-L_x_max))
axes[1].legend(handles=lines,frameon=False,fontsize=9,labelspacing=0.1,loc=1)
#axes[1].set_xlim(x_grid[0],x_grid[-1])
axes[1].set_xlim(0,160)
axes[1].set_ylim(0,0.021)
axes[1].text(0.72*160,0.32*0.02,' '*(10-2*len(name))+name,fontsize=16)
axes[1].set_xlabel('L [MeV]',fontsize=15)
fig.tight_layout(pad=1.0)
fig.savefig('./figures/Sv_L_posterior_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
#Sv_PDF_post_Skyrme=Sv_PDF_post
#L_PDF_post_Skyrme=L_PDF_post
```
[[-22.22856073 44.45712146]] [54.19852518]
[[-25.03362672 29.08318398]] [40.03770082]
[[-22.32849318 30.16012885]] [26.88414443]
[[-25.87404443 38.91293297]] [66.77608073]

```python
percentile_list = [0.68269]
color_list = ['y','k','tab:red','tab:blue','g','c']
ls_list=['--','-',(0, (2, 1, 1, 1)),(0, (4, 1, 1, 1))]
#label_list=['$R_{ch}, F_{ch}, BE$','+CREX+PREX','+CREX','+PREX']
label_list=['Basic nuclei','+CREX+PREX','+CREX','+PREX']
likelihood_name=label_list
name='Skyrme'
SAT_list=SAT_Skyrme_list
fig, axes = plt.subplots(1,2,sharex=False,sharey=False,figsize=(8,3))
density_array_list=[]
x_contours_list=[]
x_max_list=[]
line_list=[]
lines=[]
for i in range(len(SAT_list)):
x=SAT_list[i][4] #L
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
density_array_list.append(density_countours)
x_contours_list.append(x_contours)
x_max_list.append(x_max)
line_list.append(line)
print(x_contours-x_max,x_max)
for j in range(len(x_contours)):
axes[1].plot([x_contours_list[i][j][0],x_contours_list[i][j][0]],[0,density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([x_contours_list[i][j][1],x_contours_list[i][j][1]],[0,density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label=likelihood_name[i])
if(i==1):
#L_PDF_post=interpolate.UnivariateSpline(x_grid,pdf_grid,k=3,s=0)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
#lines+=axes[1].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v=%.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max)+'MeV')
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
L_x_max,L_x_contours,L_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
axes[1].plot([L_x_contours[j][0],L_x_contours[j][0]],[0,L_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([L_x_contours[j][1],L_x_contours[j][1]],[0,L_density_countours[j][1]],':',color=color_list[i],linewidth=2,label=likelihood_name[1]+'+$\\alpha_D$')
axes[1].legend(handles=lines,frameon=False,fontsize=10,labelspacing=0.2,loc='upper right', bbox_to_anchor=(1.02, 1.01))
#axes[1].set_xlim(x_grid[0],x_grid[-1])
axes[1].set_xlim(0,160)
axes[1].set_ylim(0,0.021)
axes[1].text(0.72*160,0.32*0.02,' '*(10-2*len(name))+name,fontsize=16)
axes[1].set_xlabel('L [MeV]',fontsize=15)
Sv_density_array_list=[]
Sv_x_contours_list=[]
Sv_x_max_list=[]
Sv_line_list=[]
for i in range(len(SAT_list)):
x=SAT_list[i][3] #Sv
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
Sv_density_array_list.append(density_countours)
Sv_x_contours_list.append(x_contours)
Sv_x_max_list.append(x_max)
Sv_line_list.append(line)
if(i==1):
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
Sv_x_max,Sv_x_contours,Sv_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
#first_legend=axes[0].legend(handles=line_list,frameon=False,fontsize=10,labelspacing=0.2,loc=2)
#axes[0].add_artist(first_legend)
axes[0].set_xlim(20,50)
axes[0].set_ylim(0,0.12)
axes[0].text(0.05*(50-20)+20,0.85*0.12,name,fontsize=16)
axes[0].set_xlabel('S$_v$ [MeV]',fontsize=15)
lines=[]
for i in range(len(likelihood_name)):
for j in range(len(x_contours)):
axes[0].plot([Sv_x_contours_list[i][j][0],Sv_x_contours_list[i][j][0]],[0,Sv_density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours_list[i][j][1],Sv_x_contours_list[i][j][1]],[0,Sv_density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max_list[i],Sv_x_contours_list[i][j][0]-Sv_x_max_list[i],Sv_x_contours_list[i][j][1]-Sv_x_max_list[i], x_max_list[i],x_contours_list[i][j][0]-x_max_list[i],x_contours_list[i][j][1]-x_max_list[i]))
if(i==1):
axes[0].plot([Sv_x_contours[j][0],Sv_x_contours[j][0]],[0,Sv_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$'%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max, L_x_max,L_x_contours[j][0]-L_x_max,L_x_contours[j][1]-L_x_max))
second_legend=axes[0].legend(handles=lines,frameon=False,fontsize=9,labelspacing=0.1,loc='upper right', bbox_to_anchor=(1.02, 1.01))
axes[0].add_artist(second_legend)
fig.tight_layout(pad=1.0)
fig.savefig('./figures/Sv_L_posterior_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
```
[[-26.678478 35.12103433]] [56.58082067]
[[-22.49127297 21.26892118]] [35.9491778]
[[-14.60625154 25.80437772]] [21.66114762]
[[-26.7471772 29.76094364]] [72.36653698]

```python
```
```python
```
```python
#Show Sv-L posterior with PREX and CREX:
for SAT in [SAT_Skyrme_list[1],SAT_RMF_to_Skyrme_list[1],SAT_RMF_list[1]]:
mean=np.mean(SAT[3:5],axis=1)
cov =np.cov(SAT[3],SAT[4])#,SAT_RMF_to_Skyrme_list
print(mean,cov)
```
[34.1403562 40.3024777] [[ 42.19991969 83.52975473]
[ 83.52975473 453.23480257]]
[31.42158453 49.13189107] [[ 26.71305169 92.79344187]
[ 92.79344187 749.8499721 ]]
[ 39.21816128 -10.08244647] [[ 60.26530628 -105.82394433]
[-105.82394433 2354.43845758]]
```python
#Show Sv-L posterior with Basic nucli constraints:
for SAT in [np.concatenate((SAT_Skyrme_list[0],SAT_RMF_to_Skyrme_list[0][:,::10]),axis=1)]:
mean=np.mean(SAT[3:5],axis=1)
cov =np.cov(SAT[3],SAT[4])#,SAT_RMF_to_Skyrme_list
print(mean,cov)
x,y=cov_ellipse_xy(mean,cov)
plt.plot(x,y,'--b',label='RMF')
```
[32.77900096 66.82662838] [[ 34.96167206 136.75320432]
[136.75320432 976.4023266 ]]

```python
```
```python
```
```python
def normal(FcFw_Pb,FcFw_Ca):
part3=-0.5*((FcFw_Ca-(Fc_exp[0]-Fw_exp[0]))**2/Fw_sig2[0])
part4=-0.5*((FcFw_Pb-(Fc_exp[1]-Fw_exp[1]))**2/Fw_sig2[1])
return np.exp(part3+part4)
def distribution(det_x,det_y,x_grid,y_grid,pdf_grid,pdf_function):
return (pdf_grid*(pdf_function(x_grid+det_x,y_grid+det_y))).sum()
dimension=1
def discrepancy_sig(n_sigma,discrepancy,dimension=1):
return scipy.special.gammainc(dimension/2,n_sigma**2/2) - discrepancy
```
```python
bin_num=[100,200]
#bin_num=[50,100]
percentile_list = [0.9545, 0.9, 0.6827, 0.3]
color_list = ['c','r','k','y','g']
for name,PrexCrex_list in zip(['Skyrme','RMF'],[PrexCrex_Skyrme_list,PrexCrex_RMF_to_Skyrme_list]):
discrepancy=[]
for i,PrexCrex_i,likelihood_name_i,likelihood_filename_i in zip(range(4),PrexCrex_list,likelihood_name,likelihood_filename):
y=PrexCrex_i[0]-PrexCrex_i[2]
x=PrexCrex_i[1]-PrexCrex_i[3]
x_grid,y_grid,pdf_grid=get_kde_2D([x,y],bin_num,weights=None,x_min=0,x_max=0.06)
ax=plot_density_2D(x_grid,y_grid,pdf_grid,percentile_list,color_list,'$F_{ch}^{Pb208}-F_W^{Pb208}$','$F_{ch}^{Ca48}-F_W^{Ca48}$',y_unit='',inline=True)
ax.legend(fontsize=0)
ax.set_xlim(0,0.06)
ax.set_ylim(0.02,0.06)
line=ax.plot([0.024,0.024],[0.026,0.039],'k')
line0=ax.plot([0.011,0.033],[0.034,0.034],'k',label='Ab initio')
#line1=ax.plot(Fc_new[2]-Fw_new[2],Fc_new[0]-Fw_new[0],'om',label='L30 - L130')
#line2=ax.plot(Fc_rex[2]-Fw_rex[2],Fc_rex[0]-Fw_rex[0],'o',color='brown',label='Sv31 - Sv51')
#line_skyrme=ax.plot(FchFw_andrew[logic_plot_andrew,1],FchFw_andrew[logic_plot_andrew,0],'og',label='Skyrme')
line3=ax.plot([0],[0],'--k',lw=5,label='Posterior:'+likelihood_name[i])
#line4=ax.plot(Fc_lit[2,7:]-Fw_lit[2,7:],Fc_lit[0,7:]-Fw_lit[0,7:],'+k')
#line5=ax.plot(Fc_lit[2,:7]-Fw_lit[2,:7],Fc_lit[0,:7]-Fw_lit[0,:7],'Xk')
#for j in range(len(special[0])):
# ax.text((Fc_lit[2]-Fw_lit[2])[j]+0.0004,(Fc_lit[0]-Fw_lit[0])[j]-0.0004,special[0][j],fontsize=20)
theta=np.linspace(0,2*np.pi,100)
mean_208Pb=Fc_exp[1]-Fw_exp[1]
mean_48Ca =Fc_exp[0]-Fw_exp[0]
std_208Pb =np.sqrt(Fw_sig2[1])
std_48Ca =np.sqrt(Fw_sig2[0])
ax.plot(mean_208Pb+scale_chi2[0]*std_208Pb*np.cos(theta),mean_48Ca+scale_chi2[0]*std_48Ca*np.sin(theta),'--b')
ax.plot(mean_208Pb+scale_chi2[1]*std_208Pb*np.cos(theta),mean_48Ca+scale_chi2[1]*std_48Ca*np.sin(theta),'--b')
line5=ax.plot(mean_208Pb+scale_chi2[2]*std_208Pb*np.cos(theta),mean_48Ca+scale_chi2[2]*std_48Ca*np.sin(theta),'--b',label='PREX & CREX 68%,80%,95% CR')
first_legend = ax.legend(handles=line3+line0,fontsize=40,frameon=False, loc='upper left')
ax.add_artist(first_legend)
plt.savefig('./figures/FF_posterior_'+likelihood_filename_i+'_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
det_x_plot=np.linspace(-0.05,0.05,201)
det_y_plot=np.linspace(-0.05,0.05,201)
det_pdf_plot=[]
for x_i in det_x_plot:
det_pdf_plot.append([])
for y_i in det_y_plot:
det_pdf_plot[-1].append(distribution(x_i,y_i,x_grid,y_grid,pdf_grid,normal))
det_pdf_plot=np.array(det_pdf_plot)
discrepancy.append(det_pdf_plot[det_pdf_plot>det_pdf_plot[100,100]].sum()/det_pdf_plot.sum())
discrepancy=np.array(discrepancy)
discrepancy_sigma=[]
for discrepancy_i in discrepancy:
discrepancy_sigma.append(scipy.optimize.root(discrepancy_sig,[1],args=(discrepancy_i,dimension)).x[0])
discrepancy_sigma=np.array(discrepancy_sigma)
print('discrepancy:')
print(discrepancy)
print(discrepancy_sigma)
```
['95.45' '90.0' '68.27' '30.0']
[3.41296430e-05 6.69900905e-05 1.90477190e-04 3.82325601e-04]
['95.45' '90.0' '68.27' '30.0']
[3.58176538e-05 7.26046254e-05 1.99614937e-04 4.07239349e-04]
['95.45' '90.0' '68.27' '30.0']
[3.46224025e-05 7.11831546e-05 1.97704966e-04 4.17419706e-04]
['95.45' '90.0' '68.27' '30.0']
[3.42866694e-05 7.28949447e-05 2.20050077e-04 4.81429380e-04]
discrepancy:
[0.99758352 0.93534285 0.97576861 0.99358055]
[3.03360763 1.8476213 2.25344046 2.72554979]
['95.45' '90.0' '68.27' '30.0']
[3.63940451e-05 7.18120417e-05 2.03007325e-04 4.89894953e-04]
['95.45' '90.0' '68.27' '30.0']
[3.36621251e-05 6.75055237e-05 1.99928137e-04 3.83128663e-04]
['95.45' '90.0' '68.27' '30.0']
[3.44100485e-05 6.45700366e-05 1.72366892e-04 3.20490870e-04]
['95.45' '90.0' '68.27' '30.0']
[3.73099490e-05 7.25232625e-05 2.17844822e-04 5.27913574e-04]
discrepancy:
[0.96797957 0.76028388 0.80019743 0.97115723]
[2.14415553 1.17569666 1.28211425 2.18562891]








```python
```
```python
```
```python
```
```python
N=211
r_fm_max=20
r_grid_fm =np.linspace(0,r_fm_max,N)
r_grid_MeV=r_grid_fm*unitconvert.unitMeVfm**(1/3)
omega_over_m=0.022
M=939
def load_basis(path,dir_name):
kappa_list=np.loadtxt(path+dir_name+'/init.txt')
x=np.loadtxt(path+dir_name+'/x_grid.txt')
E_over_m_list=np.loadtxt(path+dir_name+'/E_over_m.txt')
g_basis_list=[]
f_basis_list=[]
g_times_r_basis_list=[]
f_times_r_basis_list=[]
g_basis_norm_list=[]
f_basis_norm_list=[]
g_times_r_basis_norm_list=[]
f_times_r_basis_norm_list=[]
Matrix_fg=[]
for i,kappa in enumerate(kappa_list):
dir_kappa=dir_name+'/kappa_%d'%kappa
path_dir_kappa=path+dir_kappa
g_basis_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm0.txt'))
f_basis_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm0.txt'))
g_times_r_basis_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm1.txt'))
f_times_r_basis_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm1.txt'))
g_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm2.txt'))
f_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm2.txt'))
g_times_r_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm3.txt'))
f_times_r_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm3.txt'))
Matrix_fg.append(np.loadtxt(path_dir_kappa+'/Matrix_fg.txt'))
g_basis_list=np.array(g_basis_list)
f_basis_list=np.array(f_basis_list)
g_times_r_basis_list=np.array(g_times_r_basis_list)
f_times_r_basis_list=np.array(f_times_r_basis_list)
g_basis_norm_list=np.array(g_basis_norm_list)
f_basis_norm_list=np.array(f_basis_norm_list)
g_times_r_basis_norm_list=np.array(g_times_r_basis_norm_list)
f_times_r_basis_norm_list=np.array(f_times_r_basis_norm_list)
Matrix_fg=np.array(Matrix_fg)
return [kappa_list,E_over_m_list,g_basis_list,f_basis_list,g_times_r_basis_list,f_times_r_basis_list, g_basis_norm_list,f_basis_norm_list,g_times_r_basis_norm_list,f_times_r_basis_norm_list,Matrix_fg]
kappa_list,E_over_m_list,g_basis_list,f_basis_list,g_times_r_basis_list,f_times_r_basis_list, g_basis_norm_list,f_basis_norm_list,g_times_r_basis_norm_list,f_times_r_basis_norm_list,Matrix_fg=load_basis('./data/','basis')
kappa_list=list(kappa_list)
level_list=[[22,16],[11,10],[7,6]]
q_list=[0.3977,1,0.8733] #Form factor at momentum q in fm-1, q for 90Zr is dummy.
density_unit=omega_over_m*M**2/(4*np.pi*(r_grid_MeV[1]-r_grid_MeV[0]))
def vector_to_result(kappa_all,vector_all,kappa_list,basis_norm_list):
return np.array([vector_all[:,i].dot(basis_norm_list[kappa_list.index(kappa_i)]) for i,kappa_i in enumerate(kappa_all)])
def density(kappaEgf,level_list,return_levels=False):
N_basis=int(len(kappaEgf)/2-1)
kappa_all=kappaEgf[0]
densities=[]
densities_levels=[]
level_sum=0
for to_level_np in level_list:
densities_levels.append([])
for to_level in to_level_np:
g_vector_all=kappaEgf[2:(2+N_basis),level_sum:(level_sum+to_level)]
f_vector_all=kappaEgf[(2+N_basis):,level_sum:(level_sum+to_level)]
#print(kappa_all[level_sum:(level_sum+to_level)],kappa_all[level_sum:(level_sum+to_level)].shape,g_vector_all.shape)
g_result_all=vector_to_result(kappa_all[level_sum:(level_sum+to_level)],g_vector_all,kappa_list,g_basis_norm_list)
f_result_all=vector_to_result(kappa_all[level_sum:(level_sum+to_level)],f_vector_all,kappa_list,f_basis_norm_list)
g2_result_all=g_result_all**2
f2_result_all=f_result_all**2
svt_result_all=np.array([g2_result_all-f2_result_all,g2_result_all+f2_result_all,2*g_result_all*f_result_all])
densities_levels[-1].append(density_unit*svt_result_all*np.abs(2*kappa_all[level_sum:(level_sum+to_level)])[np.newaxis,:,np.newaxis])
densities.append(densities_levels[-1][-1].sum(axis=1))
level_sum+=to_level
if(return_levels):
return np.array(densities).reshape((3,2,3,N)),densities_levels
else:
return np.array(densities).reshape((3,2,3,N))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[0][:,:,:5000],'kappaEgf_RMF_to_Skyrme_none1'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[0][:,:,5000:],'kappaEgf_RMF_to_Skyrme_none2'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[1],'kappaEgf_RMF_to_Skyrme_all'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[2],'kappaEgf_RMF_to_Skyrme_crex'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[3][:,:,:5000],'kappaEgf_RMF_to_Skyrme_prex1'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[3][:,:,5000:10000],'kappaEgf_RMF_to_Skyrme_prex2'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[3][:,:,10000:],'kappaEgf_RMF_to_Skyrme_prex3'],))
#kappaEgf_RMF_to_Skyrme_list=toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_list'])[0]
kappaEgf_RMF_to_Skyrme_list=[]
kappaEgf_RMF_to_Skyrme_list+=[np.concatenate(toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_none1','kappaEgf_RMF_to_Skyrme_none2']),axis=2)]
kappaEgf_RMF_to_Skyrme_list+=toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_all','kappaEgf_RMF_to_Skyrme_crex'])
kappaEgf_RMF_to_Skyrme_list+=[np.concatenate(toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_PREX1','kappaEgf_RMF_to_Skyrme_PREX2','kappaEgf_RMF_to_Skyrme_PREX3']),axis=2)]
densities_fm3_RMF_to_Skyrme_list=[unitconvert.toMevfm(np.array([density(kappaEgf_i,level_list) for kappaEgf_i in kappaEgf.transpose((2,0,1))]),'mev4') for kappaEgf in kappaEgf_RMF_to_Skyrme_list]
```
```python
```
```python
percentile_array=np.array([16,50,84])
density_percentile=[]
#for densities_fm3_i in densities_fm3_RMF_list:
for densities_fm3_i in densities_fm3_RMF_to_Skyrme_list:
density_percentile.append(np.percentile(densities_fm3_i,percentile_array,axis=0))
density_percentile=np.array(density_percentile)
```
```python
density_percentile_to_plot=density_percentile
name='RMF'
lw=0.5
ls='--'
alpha=0.4
color_list = ['y','k','tab:red','tab:blue']
for nuclei_index,nuclei_name,file_name,xlim,xlim_small in zip([0,1,2],['$^{208}$Pb','$^{90}$Zr','$^{48}$Ca'],['pb208','zr490','ca48'],[12.5,9,7],[[6.7,7.6],[5.1,5.6],[4,4.6]]):
fig,ax=plt.subplots(1,1,figsize=(5,4),sharex=False,sharey=False)
ax.set_xticks(range(0,(int(xlim/2)+1)*2,2))
ax_twin=ax.twinx()
nucleon_index=0
band_list=[]
line_list=[]
for i in range(4):
band_list.append(ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=likelihood_name[i],color=color_list[i],linewidth=0))
line_list+=ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=likelihood_name[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
nucleon_index=1
for i in range(4):
ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=likelihood_name[i],color=color_list[i],linewidth=0)
ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=likelihood_name[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
first_legend = ax.legend(handles=band_list,fontsize=12,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax.add_artist(first_legend)
ax.set_xlim(0,xlim)
ax.set_ylim(0,0.11)
ax.set_xlabel('r [fm]',fontsize=15)
ax.set_ylabel('$n$ [fm$^{-3}$]',fontsize=15)
first_legend = ax_twin.legend(handles=line_list,fontsize=12,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax_twin.add_artist(first_legend)
ax_twin.set_xlim(0,xlim)
ax_twin.set_ylim(0,0.11)
ax_twin.set_yticklabels('')
ax.plot(np.concatenate((xlim_small,xlim_small[::-1],[xlim_small[0]])),[0.02,0.02,0.03,0.03,0.02],'k',lw=1)
ax_small=ax.inset_axes([0.72,0.35,0.25,0.4])
for nucleon_index in [0,1]:
for i in range(4):
ax_small.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,color=color_list[i],linewidth=0)
ax_small.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
ax_small.set_xlim(xlim_small[0],xlim_small[1])
ax_small.set_ylim(0.02,0.03)
ax_small.set_yticks([0.02,0.03])
second_legend = ax.legend(title=nuclei_name,handles=[],title_fontsize=20,frameon=False,handletextpad=0.3, loc='lower left', bbox_to_anchor=(0.7, 0.75))
#ax.add_artist(second_legend)
plt.savefig('./figures/profile_'+file_name+'_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
```



```python
```
```python
percentile_array=np.array([16,50,84])
b4_grid=np.array([-np.infty,0.1,0.3,0.5,0.7,np.infty])
b4_grid_name=['0.0','0.1','0.3','0.5','0.7','1']
#b4p=(eos_args_RMF[4]/763**2+eos_args_RMF[2]/980**2)/(8*939**2)*197.3**4
#densities_fm3_RMF=np.concatenate(densities_fm3_RMF_list)
b4p=(eos_args_RMF_to_Skyrme[4]/763**2+eos_args_RMF_to_Skyrme[2]/980**2)/(8*939**2)*197.3**4
densities_fm3_RMF=np.concatenate(densities_fm3_RMF_to_Skyrme_list)
density_percentile_b4p=[]
for i in range(len(b4_grid)-1):
logic_i=np.logical_and(b4p>b4_grid[i],b4p<b4_grid[i+1])
densities_fm3_i=densities_fm3_RMF[logic_i]
density_percentile_b4p.append(np.percentile(densities_fm3_i,percentile_array,axis=0))
print(len(densities_fm3_i),np.diff(PrexCrex_RMF_to_Skyrme[[11,10]][:,logic_i].mean(axis=1)),np.diff(PrexCrex_RMF_to_Skyrme[[3,1]][:,logic_i].mean(axis=1)))
print(np.diff(PrexCrex_RMF_to_Skyrme[[13,12]][:,logic_i].mean(axis=1)),np.diff(PrexCrex_RMF_to_Skyrme[[2,0]][:,logic_i].mean(axis=1)))
density_percentile_b4p=np.array(density_percentile_b4p)
```
8177 [0.21528704] [0.03138166]
[0.20179449] [0.04928604]
10325 [0.20266091] [0.02962093]
[0.18067771] [0.04601113]
7101 [0.19738922] [0.02929961]
[0.14686463] [0.04128215]
5272 [0.21358778] [0.03198978]
[0.12272937] [0.03756205]
556 [0.25659021] [0.03809455]
[0.13373278] [0.03832359]
```python
```
```python
density_percentile_to_plot=density_percentile_b4p
label_nambe_to_plot=[b4_grid_name[i]+'<$b^{\'}_4$/fm$^4$<'+b4_grid_name[i+1] for i in range(len(b4_grid_name)-1)]
lw=0.5
ls='--'
alpha=0.4
color_list=np.array([[0. , 0.64509804, 1. , 1. ],
[0.24984187, 1. , 0.71790006, 1. ],
[0.71790006, 1. , 0.24984187, 1. ],
[1. , 0.72694263, 0. , 1. ],
[1. , 0.18954248, 0. , 1. ]])
#color_list = ['y','k','tab:red','tab:blue']
for nuclei_index,nuclei_name,file_name,xlim,xlim_small in zip([0,1,2],['$^{208}$Pb','$^{90}$Zr','$^{48}$Ca'],['pb208','zr490','ca48'],[12.5,9,7],[[6.7,7.6],[5.1,5.6],[4,4.6]]):
fig,ax=plt.subplots(1,1,figsize=(5,4),sharex=False,sharey=False)
ax.set_xticks(range(0,(int(xlim/2)+1)*2,2))
ax_twin=ax.twinx()
nucleon_index=0
band_list=[]
line_list=[]
for i in range(5):
band_list.append(ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=label_nambe_to_plot[i],color=color_list[i],linewidth=0))
line_list+=ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=label_nambe_to_plot[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
nucleon_index=1
for i in range(5):
ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=label_nambe_to_plot[i],color=color_list[i],linewidth=0)
ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=label_nambe_to_plot[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
first_legend = ax.legend(handles=band_list,fontsize=11,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax.add_artist(first_legend)
ax.set_xlim(0,xlim)
ax.set_ylim(0,0.11)
ax.set_xlabel('r [fm]',fontsize=15)
ax.set_ylabel('$n$ [fm$^{-3}$]',fontsize=15)
first_legend = ax_twin.legend(handles=line_list,fontsize=11,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax_twin.add_artist(first_legend)
ax_twin.set_xlim(0,xlim)
ax_twin.set_ylim(0,0.11)
ax_twin.set_yticklabels('')
ax.plot(np.concatenate((xlim_small,xlim_small[::-1],[xlim_small[0]])),[0.02,0.02,0.03,0.03,0.02],'k',lw=1)
ax_small=ax.inset_axes([0.72,0.35,0.25,0.4])
for nucleon_index in [0,1]:
for i in range(5):
ax_small.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=label_nambe_to_plot[i],color=color_list[i],linewidth=0)
ax_small.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=label_nambe_to_plot[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
ax_small.set_xlim(xlim_small[0],xlim_small[1])
ax_small.set_ylim(0.02,0.03)
ax_small.set_yticks([0.02,0.03])
second_legend = ax.legend(title=nuclei_name,handles=[],title_fontsize=20,frameon=False,handletextpad=0.3, loc='lower left', bbox_to_anchor=(0.7, 0.75))
#ax.add_artist(second_legend)
plt.savefig('./figures/profile_b4p_'+file_name+'_RMF.pdf',bbox_inches = 'tight',format='pdf')
```



```python
```
|
sotzeeREPO_NAMECPREXPATH_START.@CPREX_extracted@[email protected]@.PATH_END.py
|
{
"filename": "AmoebaFitter.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/source/AmoebaFitter.py",
"type": "Python"
}
|
import numpy as numpy
import math
from . import Tools
from .MaxLikelihoodFitter import MaxLikelihoodFitter
from .AnnealingAmoeba import AnnealingAmoeba
__author__ = "Do Kester"
__year__ = 2023
__license__ = "GPL3"
__version__ = "3.1.0"
__url__ = "https://www.bayesicfitting.nl"
__status__ = "Perpetual Beta"
# *
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * A JAVA version of this code was part of the Herschel Common
# * Science System (HCSS), also under GPL3.
# *
# * 2003 - 2014 Do Kester, SRON (Java code)
# * 2017 - 2023 Do Kester
class AmoebaFitter( MaxLikelihoodFitter ):
"""
Fitter using the simulated annealing simplex minimum finding algorithm,
See also: @AnnealingAmoeba
Author Do Kester
Examples
--------
# assume x and y are Double1d data arrays.
>>> x = numpy.arange( 100, dtype=float ) / 10
>>> y = 3.5 * SIN( x + 0.4 ) # make sine
>>> numpy.random.seed( 12345L ) # Gaussian random number generator
>>> y += numpy.random.randn( 100 ) * 0.2 # add noise
>>> sine = SineModel( ) # sinusiodal model
>>> lolim = numpy.asarray( [1,-10,-10], dtype=float )
>>> hilim = numpy.asarray( [100,10,10], dtype=float )
>>> sine.setLimits( lolim, hilim ) # set limits on the model parameters
>>> amfit = AmoebaFitter( x, sine )
>>> param = amfit.fit( y, temp=10 )
>>> stdev = amfit.getStandardDeviation( ) # stdevs on the parameters
>>> chisq = amfit.getChiSquared( )
>>> scale = amfit.getScale( ) # noise scale
>>> yfit = amfit.getResult( ) # fitted values
>>> yfit = sine( x ) # fitted values ( same as previous )
>>> yband = amfit.monteCarloError( ) # 1 sigma confidence region
# for diagnostics ( or just for fun )
>>> amfit = AmoebaFitter( x, sine )
>>> amfit.setTemperature( 10 ) # set a temperature to escape local minima
>>> amfit.setVerbose( 10 ) # report every 10th iteration
>>> plotter = IterationPlotter( ) # from BayesicFitting
>>> amfit.setPlotter( plotter, 20 ) # make a plot every 20th iteration
>>> param = amfit.fit( y )
Notes
-----
1. AmoebaFitter is not guaranteed to find the global minimum.
2. The calculation of the evidence is an Gaussian approximation which is
only exact for linear models with a fixed scale.
Author : Do Kester.
"""
# *************************************************************************
def __init__( self, xdata, model, **kwargs ):
"""
Create a new Amoeba class, providing inputs and model.
Parameters
----------
xdata : array_like
independent input values
model : Model
the model function to be fitted
kwargs : dict
Possibly includes keywords from
MaxLikelihoodFitter : errdis, scale, power
IterativeFitter : maxIter, tolerance, verbose
BaseFitter : map, keep, fixedScale
"""
if model.npchain <= 1 :
raise ValueError( "AmoebaFitter cannot make a simplex of one parameter" )
super( AmoebaFitter, self ).__init__( xdata, model, **kwargs )
# *************************************************************************
def fit( self, data, weights=None, par0=None, keep=None, size=None,
seed=4567, temp=0, limits=None, maxiter=1000,
tolerance=0.0001, cooling=0.95, steps=10,
verbose=0, plot=False, accuracy=None, callback=None ):
### TBC parameter defaults
"""
Return Model fitted to the data array.
When done, it also calculates the hessian matrix and chisq.
Parameters
----------
data : array_like
the data vector to be fitted
weights : array_like
weights pertaining to the data
The weights are relative weights unless `scale` is set.
accuracy : float or array_like
accuracy of (individual) data
par0 : array_like
initial values of teh parameters of the model
default: from model
keep : dict of {int:float}
dictionary of indices (int) to be kept at a fixed value (float)
The values of keep are only valid for *this* fit
See also `AmoebaFitter( ..., keep=dict )`
size : float or array_like
step size of the simplex
seed : int
for random number generator
temp : float
temperature of annealing (0 is no annealing)
limits : None or list of 2 floats or list of 2 array_like
None : no limits applied
[lo,hi] : low and high limits for all values
[la,ha] : low array and high array limits for the values
maxiter : int
max number of iterations
tolerance : float
stops when ( |hi-lo| / (|hi|+|lo|) ) < tolerance
cooling : float
cooling factor when annealing
steps : int
number of cycles in each cooling step.
verbose : int
0 : silent
1 : print results to output
2 : print some info every 100 iterations
3 : print some info all iterations
plot : bool
plot the results.
callback : callable
is called each iteration as
`val = callback( val )`
where `val` is the minimizable array
"""
fitIndex, data, weights = self.fitprolog( data, weights=weights,
accuracy=accuracy, keep=keep )
func = self.makeFuncs( data, weights=weights, index=fitIndex, ret=1 )
if par0 is None :
par0 = self.model.parameters
if fitIndex is not None and len( fitIndex ) < len( par0 ) :
par0 = par0[fitIndex]
kwargs = {}
if size is not None :
kwargs["size"] = size
if seed is not None :
kwargs["seed"] = seed
if temp is not None :
kwargs["temp"] = temp
if limits is not None :
kwargs["limits"] = limits
if maxiter is not None :
kwargs["maxiter"] = maxiter
if tolerance is not None :
kwargs["reltol"] = tolerance
kwargs["abstol"] = tolerance
if cooling is not None :
kwargs["cooling"] = cooling
if steps is not None :
kwargs["steps"] = steps
if verbose is not None :
kwargs["verbose"] = verbose
if callback is not None :
kwargs["callback"] = callback
amoeba = AnnealingAmoeba( func, par0, **kwargs )
par = amoeba.minimize()
parameters = self.insertParameters( par, index=fitIndex )
self.model.parameters = parameters
if self.isChisq :
self.chisq = amoeba.fopt
else :
self.logLikelihood = -amoeba.fopt
self.chisq = self.chiSquared( data, weights=weights )
self.iter = amoeba.iter
self.ntrans = amoeba.ncalls
self.simplex = amoeba.simplex
self.values = amoeba.values
# plot = plot or ( verbose == 2 )
self.fitpostscript( data, plot=plot )
return parameters
def __str__( self ):
""" Return name of the fitter. """
return "AmoebaFitter"
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@[email protected]@.PATH_END.py
|
{
"filename": "_xref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/colorbar/_xref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xref", parent_name="surface.colorbar", **kwargs):
super(XrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@colorbar@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "bwinkel/cygrid",
"repo_path": "cygrid_extracted/cygrid-master/cygrid/__init__.py",
"type": "Python"
}
|
from .cygrid import *
from .healpix import *
from .hphashtab import *
from .helpers import *
from .mock import *
from .init_testrunner import *
from .version import version
__version__ = version
|
bwinkelREPO_NAMEcygridPATH_START.@cygrid_extracted@cygrid-master@cygrid@[email protected]_END.py
|
{
"filename": "setup.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
import re
def get_property(prop, project):
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
open(project + '/__init__.py').read())
return result.group(1)
# reqs = []
# for line in open('requirements.txt', 'r').readlines():
# reqs.append(line)
setup(
name="kpfpipe",
version=get_property('__version__', 'kpfpipe'),
author="BJ Fulton, Arpita Roy, Andrew Howard",
packages=find_packages(),
entry_points={'console_scripts': ['kpf=kpfpipe.cli:main']},
)
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@[email protected]@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/legendgrouptitle/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self,
plotly_name="lineposition",
parent_name="densitymap.legendgrouptitle.font",
**kwargs,
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@densitymap@legendgrouptitle@font@[email protected]_END.py
|
{
"filename": "test_orders.py",
"repo_name": "chandra-marx/marxs",
"repo_path": "marxs_extracted/marxs-main/marxs/missions/arcus/tests/test_orders.py",
"type": "Python"
}
|
# Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
from marxs.source import PointSource, FixedPointing
import astropy.units as u
from astropy.coordinates import SkyCoord
from marxs.math.utils import xyz2zxy
from .. import Arcus
import pytest
e = 0.5 * u.keV
mysource = PointSource(coords=SkyCoord(30. * u.deg, 30. * u.deg),
energy=e)
mypointing = FixedPointing(coords=SkyCoord(30 * u.deg, 30. * u.deg),
reference_transform=xyz2zxy)
@pytest.mark.parametrize("instrument", [Arcus(channels=['1']),
Arcus(channels=['2m'])])
def test_orders_are_focussed(instrument):
'''Check that the orders look reasonable.
This test tries to be generic so that coordinate system etc. can be
changed later, but still check that all light is focused to
one point to detect error in setting up the mirrors.
'''
photons = mysource.generate_photons(2e4 * u.s)
photons = mypointing(photons)
photons = instrument(photons)
for i in range(-12, 1):
ind = (photons['order'] == i) & np.isfinite(photons['det_x']) & (photons['probability'] > 0)
if ind.sum() > 100:
assert np.std(photons['det_y'][ind & (photons['order_L1'] == 0)]) < 1
assert np.std(photons['det_x'][ind]) < 1
assert np.std(photons['det_x'][ind]) < np.std(photons['det_y'][ind])
# But make sure that are not focussed too much
# That would indicate that the scattering did not work
assert np.std(photons['det_x'][ind]) > .04
assert np.std(photons['det_y'][ind]) > .1
def test_zeroth_order_and_some_dispersed_orders_are_seen():
'''test that both the zeroth order and some of the dispersed
orders are positioned in the detector.
'''
photons = mysource.generate_photons(2e4 * u.s)
photons = mypointing(photons)
photons = Arcus()(photons)
n_det = [((photons['order'] == i) & np.isfinite(photons['det_x'])).sum() for i in range(-12, 1)]
assert n_det[-1] > 0
assert sum(n_det[:9]) > 0
def test_two_optical_axes():
'''Check that there are two position for the zeroth order.'''
photons = mysource.generate_photons(1e5 * u.s)
photons = mypointing(photons)
photons = Arcus()(photons)
i0 = (photons['order'] == 0) & np.isfinite(photons['det_x']) & (photons['probability'] > 0)
assert i0.sum() > 250
assert np.std(photons['det_y'][i0]) > 1
assert np.std(photons['det_x'][i0]) > 1
@pytest.mark.parametrize("instrum, expected_area",
[(Arcus(), 400 * u.cm**2),
(Arcus(channels=['1']), 100 * u.cm**2),
(Arcus(channels=['2']), 100 * u.cm**2),
(Arcus(channels=['1m']), 100 * u.cm**2),
(Arcus(channels=['2m']), 100 * u.cm**2)])
def test_effective_area(instrum, expected_area):
'''Surely, the effective area of Arcus will evolve a little when the
code is changed to accommodate e.g. a slightly different mounting
for the gratings, but if the effective area drops or increases
dramatically, that is more likely a sign for a bug in the code.
'''
photons = mysource.generate_photons(2e4 * u.s)
photons = mypointing(photons)
photons = instrum(photons)
ind = np.isfinite(photons['det_x'])
a_eff = np.sum(photons['probability'][ind]) / len(photons) * instrum.elements[0].area
assert a_eff > 0.7 * expected_area
assert a_eff < 1.5 * expected_area
|
chandra-marxREPO_NAMEmarxsPATH_START.@marxs_extracted@marxs-main@marxs@missions@arcus@tests@[email protected]_END.py
|
{
"filename": "kep51_posterior_vs_emcee.ipynb",
"repo_name": "kemasuda/jnkepler",
"repo_path": "jnkepler_extracted/jnkepler-main/examples/comparison_with_ttvfast+emcee/kep51_posterior_vs_emcee.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import dill
import corner
import pandas as pd
```
```python
import seaborn as sns
sns.set(style='ticks', font_scale=1.6, font='times')
plt.rcParams["figure.figsize"] = (12,6)
from matplotlib import rc
rc('text', usetex=True)
```
```python
#hmc = dill.load(open('../kep51/dt1.0_nw500_ns1500_c4_mcmc.pkl', 'rb')).get_samples()
hmc = dill.load(open('../kep51/dmm_dt1.0_nw500_ns1500_c4_mcmc.pkl', 'rb')).get_samples()
```
```python
hmc['ec'] = np.sqrt(hmc['ecc']) * np.cos(hmc['omega'])
hmc['es'] = np.sqrt(hmc['ecc']) * np.sin(hmc['omega'])
```
```python
hmcsmp = np.r_[[[hmc['mass'][:,i], hmc['period'][:,i], hmc['ec'][:,i], hmc['es'][:,i], hmc['tic'][:,i]] for i in range(3)]].reshape(15, -1).T
```
```python
np.shape(hmcsmp)
```
(6000, 15)
```python
emc = pd.read_csv("kep51.tsv", comment='#', delimiter='|')
```
```python
labels = np.array([['$m_%d/M_\star$'%i, '$P_%d$'%i, '$\sqrt{e}_%d\cos\omega_%d$'%(i,i), '$\sqrt{e}_%d\sin\omega_%d$'%(i,i), '$T_%d$'%i] for i in range(1,4)]).ravel()
```
```python
np.shape(emc), len(emc)
```
((5100, 15), 5100)
```python
import matplotlib.lines as mlines
names = ["Libby-Roberts et al. (2020)\n TTVFast + emcee", "This Work\n jnkepler + NUTS sampler in NumPyro"]
ndim = np.shape(emc)[1]
```
```python
fig_ = corner.corner(emc, labels=labels, show_titles=True, bins=20, color='C0', title_fmt=None)
fig = corner.corner(hmcsmp[:len(emc)], fig=fig_, color='C1', bins=20)
handles = []
for i, name in enumerate(names):
handles.append(mlines.Line2D([], [], color='C%d'%i, label=name))
plt.legend(handles=handles, bbox_to_anchor=(0.8, ndim), fontsize=35)
plt.savefig("corner_3planet.png", dpi=200, bbox_inches="tight")
```
WARNING:root:Pandas support in corner is deprecated; use ArviZ directly

```python
```
|
kemasudaREPO_NAMEjnkeplerPATH_START.@jnkepler_extracted@jnkepler-main@examples@comparison_with_ttvfast+emcee@[email protected]_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcats/tickfont/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self, plotly_name="lineposition", parent_name="parcats.tickfont", **kwargs
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@parcats@tickfont@[email protected]_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/cluster/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattermap.cluster", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@cluster@[email protected]_END.py
|
{
"filename": "gasBinning.py",
"repo_name": "GBTAmmoniaSurvey/GAS",
"repo_path": "GAS_extracted/GAS-master/GAS/gasBinning.py",
"type": "Python"
}
|
import numpy as np
from GAS.gridregion import channelShift
import scipy.ndimage as nd
import astropy.units as u
from spectral_cube import SpectralCube
from GAS import voronoi_2d_binning as v2d
def BinByMask(DataCube, Mask = None, CentroidMap = None,
CentroidAggregator = np.nanmean, x = None, y = None):
"""
Bin a data cube by a label mask, aligning the data to a common centroid. Returns an array.
Parameters
----------
DataCube : SpectralCube
The original spectral cube with spatial dimensions Nx, Ny and spectral dimension Nv
Mask : 2D numpy.ndarray
A 2D map containing boolean values with True indicate where the spectra should be aggregated.
CentroidMap : 2D numpy.ndarray
A 2D map of the centroid velocities for the lines to stack of dimensions Nx, Ny.
Note that DataCube and Centroid map must have the same spectral units (e.g., km/s)
CentroidAggregator : numpy.ufunc
Operates on a vector of centroid data and returns the value summarizing that object.
BackgroundLabels : list
List of values in the label map that correspond to background objects and should not
be processed with the stacking.
Returns
-------
Spectrum : np.array
Spectrum of average over mask.
"""
ChannelWidth = (np.median(DataCube.spectral_axis-
np.roll(DataCube.spectral_axis,1)))
RawData = DataCube.unmasked_data[:].value
if Mask:
y,x= np.where(Mask)
# Trap y,x not set HERE!
if CentroidMap is None:
AccumSpec = np.nanmean(DataCube.filled_data[:,y,x].value,axis=1)
else:
CentroidValue = CentroidAggregator(CentroidMap[y,x])
DeltaV = CentroidValue-CentroidMap[y,x]
DeltaChan = DeltaV/ChannelWidth.value
AccumSpec = np.zeros(DataCube.spectral_axis.shape+y.shape)
for idx,(ThisX,ThisY) in enumerate(zip(x,y)):
# Note this assumes the units of the centroid map
# are in same units as the spectral axis of the cube.
AccumSpec[:,idx] = channelShift(DataCube[:,ThisY,ThisX].value,
-DeltaChan[idx])
AccumSpec = np.nanmean(AccumSpec,axis=1)
OffsetVelocity = DataCube.spectral_axis.value-CentroidValue
return AccumSpec,OffsetVelocity
def BinByLabel(DataCube, LabelMap, CentroidMap = None,
CentroidAggregator = np.nanmean, BackgroundLabels = [0]):
"""
Bin a data cube by a label mask, aligning the data to a common centroid.
Parameters
----------
DataCube : SpectralCube
The original spectral cube with spatial dimensions Nx, Ny and spectral dimension Nv
LabelMap : 2D numpy.ndarray
A 2D map containing integer labels for each pixel into objects defining the stacking.
CentroidMap : 2D numpy.ndarray
A 2D map of the centroid velocities for the lines to stack of dimensions Nx, Ny.
Note that DataCube and Centroid map must have the same spectral units (e.g., km/s)
CentroidAggregator : numpy.ufunc
Operates on a vector of centroid data and returns the value summarizing that object.
BackgroundLabels : list
List of values in the label map that correspond to background objects and should not
be processed with the stacking.
Returns
-------
OutputCube : SpectralCube
A SpectralCube instance matching the input but with spectra aligned
in velocity and averaged.
"""
UniqLabels = np.unique(LabelMap)
ChannelWidth = np.median(DataCube.spectral_axis-
np.roll(DataCube.spectral_axis,1))
RawData = DataCube.unmasked_data[:].value
for ThisLabel in UniqLabels:
if ThisLabel not in BackgroundLabels:
y,x= np.where(ThisLabel == LabelMap)
AccumSpec = np.zeros(DataCube.shape[0])
if CentroidMap is None:
for ThisX,ThisY in zip(x,y):
AccumSpec += DataCube[:,ThisY,ThisX].value
else:
CentroidValue = CentroidAggregator(CentroidMap[y,x])
for ThisX,ThisY in zip(x,y):
DeltaV = CentroidMap[ThisY,ThisX] - CentroidValue
# Note this assumes the units of the centroid map
# are in same units as the spectral axis of the cube.
DeltaChan = DeltaV/ChannelWidth.value
AccumSpec += channelShift(DataCube[:,ThisY,ThisX].value,
-DeltaChan)
AccumSpec /= x.size
AccumSpec.shape = AccumSpec.shape+ (1,)
RawData[:,y,x] = AccumSpec
return (SpectralCube(data = RawData,wcs = DataCube.wcs))
def VoronoiBin(IntegratedIntensity, NoiseMap, TargetValue= 5,
threshold = 0, mask = None, aggregator = v2d._sn_func):
"""
This routine wraps the 2D Binning approach of Cappellari and Copin with altered
functionality to make it more generally useful in arbitrary spectral line cases.
The output of the labeling process should be used in `BinByLabel`.
Parameters
----------
IntegratedIntensity : 2D numpy.ndarray
Map containing the data to be processed by the binning algorithm.
NoiseMap : 2D numpy.ndarray
Map containing the RMS at each point. This must be the same dimensions
as the IntegratedIntensity.
TargetValue : number
Value that the binning should try to achieve for the aggregator
function (see below). Since aggregation defaults to signal-to-noise,
the value would be the target S/N.
threshold : number
Positions with IntegratedIntensity less than the threshold are not binned.
mask : 2D numpy.ndarray
Binary mask that tests False where aggregation should be ignored.
aggregator : function
Function with call signature f(Signal, Noise, index) which returns a single number
for pixels of value index.
"""
SignalToNoise = IntegratedIntensity/NoiseMap
if mask is None:
mask = SignalToNoise > threshold
x, y = np.where(mask)
signal = IntegratedIntensity[x,y]
noise = NoiseMap[x,y]
labels = np.zeros(IntegratedIntensity.shape)
SNmap = np.zeros(IntegratedIntensity.shape)
voronoi_label, x0, y0, \
xbar, ybar, sn, npix, scale = v2d.voronoi_2d_binning(
x, y, signal, noise, TargetValue, plot = False, aggregator= aggregator)
for ThisLabel,SNval in enumerate(sn):
label_index = np.where(voronoi_label == ThisLabel)
if SNval > TargetValue:
labels[x[label_index],y[label_index]] = ThisLabel
SNmap[x[label_index],y[label_index]] = sn[ThisLabel]
return(labels,SNmap)
|
GBTAmmoniaSurveyREPO_NAMEGASPATH_START.@GAS_extracted@GAS-master@[email protected]@.PATH_END.py
|
{
"filename": "common.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/misc/common.py",
"type": "Python"
}
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
from numpy import arange, newaxis, hstack, product, array, fromstring
__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@scipy@[email protected]@.PATH_END.py
|
{
"filename": "chat_template.py",
"repo_name": "OpenAccess-AI-Collective/axolotl",
"repo_path": "axolotl_extracted/axolotl-main/src/axolotl/prompt_strategies/chat_template.py",
"type": "Python"
}
|
"""
HF Chat Templates prompt strategy
"""
import logging
from typing import Any, Dict, List, Optional
from transformers import ProcessorMixin
from axolotl.prompt_tokenizers import PromptTokenizingStrategy
from axolotl.prompters import IGNORE_TOKEN_ID, Prompter
from axolotl.utils.chat_templates import get_chat_template_from_config
# Configure the logger
LOG = logging.getLogger("axolotl")
LOG.setLevel(logging.INFO)
class ChatTemplatePrompter(Prompter):
"""Prompter for HF chat templates"""
def __init__(
self,
tokenizer,
processor=None,
chat_template=None,
max_length=2048,
message_field_role: str = "from",
message_field_content: str = "value",
message_field_training: Optional[str] = None,
message_field_training_detail: Optional[str] = None,
roles: Optional[Dict[str, List[str]]] = None,
drop_system_message: bool = False,
):
if roles:
self.roles = {s: t for t, sources in roles.items() for s in sources}
else:
self.roles = {
"human": "user",
"user": "user",
"assistant": "assistant",
"gpt": "assistant",
"system": "system",
}
self.message_field_role = message_field_role
self.message_field_content = message_field_content
self.message_field_training = message_field_training
self.message_field_training_detail = message_field_training_detail
self.tokenizer = tokenizer
self.processor: ProcessorMixin = processor
self.chat_template = chat_template
self.max_length = max_length
self.drop_system_message = drop_system_message
def build_prompt(self, conversation, add_generation_prompt=False, images=None):
if self.processor:
text = self.processor.apply_chat_template(
conversation,
chat_template=self.chat_template,
tokenize=False,
add_generation_prompt=add_generation_prompt,
)
batch = self.processor(
text=text,
images=images,
return_tensors="pt",
)
# workaround since processor works in batches instead of single examples
for k, val in batch.items():
if k in ["pixel_values"]:
batch[k] = val.tolist()
else:
batch[k] = val.squeeze().tolist()
return batch
return self.tokenizer.apply_chat_template(
conversation,
add_generation_prompt=add_generation_prompt,
chat_template=self.chat_template,
)
def get_offsets_for_train_detail(
self, text: str, train_details: List[Dict], mask_untrainable: bool = True
) -> List[int]:
tokenized_output = self.tokenizer(
text, return_offsets_mapping=True, add_special_tokens=False
)
tokens = tokenized_output.tokens()
token_offsets = tokenized_output["offset_mapping"]
LOG.debug(f"Tokenizing text: {text}")
LOG.debug(f"Tokens: {tokens}")
# Adjust the end offsets. For some reason by default they are set to the same value as the start offsets.
for i in range(len(token_offsets) - 1):
token_offsets[i] = (token_offsets[i][0], token_offsets[i + 1][0] - 1)
# Ensure the last token's end offset is set correctly
token_offsets[-1] = (token_offsets[-1][0], len(text) - 1)
LOG.debug(f"Token offsets: {token_offsets}")
# Initialize all offsets as IGNORE_TOKEN_ID (not trained)
result = [IGNORE_TOKEN_ID] * len(token_offsets)
# Adjust train_details to align with token boundaries
adjusted_train_details = self.adjust_train_details(train_details, token_offsets)
for idx, (start, end) in enumerate(token_offsets):
for detail in adjusted_train_details:
# Check if the token is completely within the detail's range
if start >= detail["begin_offset"] and end <= detail["end_offset"]:
if detail["train"] or not mask_untrainable:
result[idx] = start
LOG.debug(f"Token {idx} ({tokens[idx]}) marked for training")
else:
LOG.debug(
f"Token {idx} ({tokens[idx]}) marked as non-trainable"
)
elif start < detail["end_offset"] and end > detail["begin_offset"]:
# Token partially overlaps with detail, always mark as non-trainable
LOG.debug(
f"Token {idx} ({tokens[idx]}) partially overlaps detail, marked as non-trainable"
)
LOG.debug(f"Final result: {result}")
return result
def adjust_train_details(
self, train_details: List[Dict], token_offsets: List[tuple]
) -> List[Dict]:
adjusted_details = []
for detail in train_details:
begin_offset = detail["begin_offset"]
end_offset = detail["end_offset"]
# Find the first token that starts after or at the begin_offset
begin_token = next(
(
i
for i, (t_start, t_end) in enumerate(token_offsets)
if t_start >= begin_offset
),
len(token_offsets),
)
if begin_token > 0 and token_offsets[begin_token - 1][1] > begin_offset:
begin_token -= 1
# Find the last token that ends before or at the end_offset
end_token = next(
(
i
for i in range(len(token_offsets) - 1, -1, -1)
if token_offsets[i][1] <= end_offset
),
-1,
)
if (
end_token < len(token_offsets) - 1
and token_offsets[end_token + 1][0] < end_offset
):
end_token += 1
if begin_token <= end_token:
adjusted_begin = token_offsets[begin_token][0]
adjusted_end = token_offsets[end_token][1]
if adjusted_begin != begin_offset or adjusted_end != end_offset:
LOG.warning(
f"Adjusting detail offsets: ({begin_offset}, {end_offset}) -> ({adjusted_begin}, {adjusted_end})"
)
adjusted_details.append(
{
"begin_offset": adjusted_begin,
"end_offset": adjusted_end,
"train": detail["train"],
}
)
else:
LOG.warning(
f"Could not adjust detail offsets: ({begin_offset}, {end_offset}). Skipping this detail."
)
return adjusted_details
class ChatTemplateStrategy(PromptTokenizingStrategy):
"""
Tokenizing strategy for instruction-based prompts.
"""
_messages = "conversations"
def __init__(
self,
prompter,
tokenizer,
train_on_inputs,
sequence_len,
roles_to_train=None,
train_on_eos=None,
):
super().__init__(prompter, tokenizer, train_on_inputs, sequence_len)
self.roles_to_train = []
if roles_to_train:
# map roles if exist in prompter.roles else use the role as is
self.roles_to_train = [
prompter.roles.get(role, role) for role in roles_to_train
]
self.train_on_eos = train_on_eos
self.images = "images"
@property
def messages(self):
return self._messages
@messages.setter
def messages(self, messages):
self._messages = messages
def tokenize_prompt(self, prompt):
# Old simple legacy behavior that works reliably.
if (
not self.roles_to_train
and not self.train_on_eos
and not self.prompter.message_field_training
and not self.prompter.message_field_training_detail
):
turns = self.get_conversation_thread(prompt)
images = self.get_images(prompt)
prompt_ids = self.prompter.build_prompt(
turns[:-1],
add_generation_prompt=True,
images=images,
)
tokenized_res = self.prompter.build_prompt(turns, images=images)
tokenized_prompt = {}
if isinstance(tokenized_res, list):
input_ids = prompt_ids + tokenized_res[len(prompt_ids) :]
tokenized_prompt["input_ids"] = input_ids
tokenized_prompt["attention_mask"] = [1] * len(input_ids)
else:
input_ids = tokenized_res["input_ids"]
tokenized_prompt = tokenized_res
if not self.train_on_inputs:
user_prompt_len = len(prompt_ids)
labels = [-100] * user_prompt_len + input_ids[user_prompt_len:]
else:
labels = input_ids
tokenized_prompt["labels"] = labels
return tokenized_prompt
turns = self.get_conversation_thread(prompt)
input_ids = self.prompter.build_prompt(turns)
labels = [IGNORE_TOKEN_ID] * len(input_ids)
last_eos_idx = -1
for index, turn in enumerate(turns):
role = turn.get("role")
content = turn.get("content")
train_turn = turn.get("training")
train_detail = turn.get("training_detail")
LOG.debug(
f"Processing turn {index}: role={role}, content={content}, train_turn={train_turn}, train_detail={train_detail}"
)
should_train = None
if train_turn is not None:
should_train = train_turn
elif train_detail is not None:
should_train = bool(train_detail)
else:
should_train = self.train_on_inputs or role in self.roles_to_train
LOG.debug(f"Should train: {should_train}")
turn_start_idx, turn_end_idx = self.find_turn(
conversation_ids=input_ids, turn=index, turn_content=turn
)
if turn_start_idx == -1 or turn_end_idx == -1:
LOG.warning(f"Failed to find boundaries for turn {index}")
LOG.debug(f"Turn indices: start={turn_start_idx}, end={turn_end_idx}")
if should_train and turn_start_idx != -1 and turn_end_idx != -1:
if train_detail:
token_offsets = self.prompter.get_offsets_for_train_detail(
content, train_detail
)
LOG.debug(f"Token offsets: {token_offsets}")
for i, offset in enumerate(token_offsets):
if offset != IGNORE_TOKEN_ID and turn_start_idx + i < len(
input_ids
):
labels[turn_start_idx + i] = input_ids[turn_start_idx + i]
LOG.debug(
f"Label set at index {turn_start_idx + i}: {input_ids[turn_start_idx + i]}"
)
else:
labels[turn_start_idx:turn_end_idx] = input_ids[
turn_start_idx:turn_end_idx
]
LOG.debug(
f"Set labels for training from {turn_start_idx} to {turn_end_idx}"
)
LOG.debug(f"Labels after processing turn {index}: {labels}")
# Handle EOS token
eos_idx = self.find_eos_token(input_ids, turn_end_idx)
if eos_idx == turn_end_idx:
last_eos_idx = eos_idx
if self.train_on_eos == "all" or (
self.train_on_eos == "turn" and should_train
):
labels[eos_idx] = input_ids[eos_idx]
LOG.debug(f"EOS token set for training at index {eos_idx}")
else:
LOG.debug(
f"EOS token missing after turn {turn}. eos_idx: {eos_idx}, turn_end_idx: {turn_end_idx}"
)
# Handle 'last' option for train_on_eos
if self.train_on_eos == "last" and last_eos_idx != -1:
labels[last_eos_idx] = input_ids[last_eos_idx]
LOG.debug(f"Last EOS token set for training at index {last_eos_idx}")
LOG.debug(f"Final labels: {labels}")
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": [1] * len(input_ids),
}
def find_eos_token(self, input_ids, start_idx):
eos_token_id = self.tokenizer.eos_token_id
for i in range(start_idx, len(input_ids)):
if input_ids[i] == eos_token_id:
return i
return -1
def find_turn(self, conversation_ids: list[int], turn: int, turn_content: dict):
"""
Locate the starting and ending indices of the specified turn in a conversation.
"""
content = turn_content.get("content")
content_ids = self.tokenizer.encode(content, add_special_tokens=False)
LOG.debug(f"content_ids (length {len(content_ids)}): {content_ids}")
if not content_ids:
LOG.warning(f"Empty content for turn {turn}")
return -1, -1
# For first turn, start from beginning
if turn == 0:
start_search_idx = 0
else:
# For subsequent turns, find the previous EOS token
eos_token_id = self.tokenizer.eos_token_id
eos_count = 0
start_search_idx = 0
for i, token_id in enumerate(conversation_ids):
if token_id == eos_token_id:
eos_count += 1
if eos_count == turn: # Find the nth EOS token where n = turn
start_search_idx = i + 1
break
# we can optimize this to only search for a few tokens from start_search_idx
# but it would risk missing the content if it's not found within the first few tokens or
# if start_search_idx cannot be found above.
last_index = len(conversation_ids) - len(content_ids) + 1
if last_index < start_search_idx:
LOG.warning(
f"last_index to search is less than start_search_idx for turn {turn}"
)
return -1, -1
# Search for content starting from start_search_idx
first_elem = content_ids[0]
for i in range(start_search_idx, last_index):
# Quick check of first element before doing full comparison
if conversation_ids[i] == first_elem:
# Check if the rest of the content matches
if conversation_ids[i : i + len(content_ids)] == content_ids:
LOG.debug(f"Found turn {turn} content at position {i}")
return i, i + len(content_ids)
return -1, -1
def get_conversation_thread(self, prompt):
turns = [
{
"role": self.prompter.roles[t[self.prompter.message_field_role]],
"content": t[self.prompter.message_field_content],
"training": t.get(self.prompter.message_field_training),
"training_detail": t.get(self.prompter.message_field_training_detail),
}
for t in prompt[self.messages]
]
if self.prompter.drop_system_message and turns[0]["role"] == "system":
turns = turns[1:]
return turns
def get_images(self, prompt):
return prompt.get(self.images, None)
def load(tokenizer, cfg, ds_cfg: Optional[Dict[str, Any]] = None, processor=None):
# pylint: disable=duplicate-code
ds_cfg = ds_cfg or {}
chat_template_string = get_chat_template_from_config(
cfg=cfg, ds_cfg=ds_cfg, tokenizer=tokenizer
)
LOG.info(f"Using chat template:\n---\n{chat_template_string!s}\n---")
prompter_params = {
"tokenizer": tokenizer,
"chat_template": chat_template_string,
"message_field_role": ds_cfg.get("message_field_role", "role"),
"message_field_content": ds_cfg.get("message_field_content", "content"),
"message_field_training": ds_cfg.get("message_field_training", None),
"message_field_training_detail": ds_cfg.get(
"message_field_training_detail",
None,
),
"roles": ds_cfg.get("roles"),
"drop_system_message": ds_cfg.get("drop_system_message", False),
# we need to add one for detecting sequences with exceeding the `sequence_len` limit.
"max_length": cfg.sequence_len + 1,
"processor": processor,
}
strategy_params = {
"train_on_inputs": cfg.train_on_inputs,
"sequence_len": cfg.sequence_len,
"roles_to_train": ds_cfg.get("roles_to_train", []),
"train_on_eos": ds_cfg.get("train_on_eos", None),
}
strategy = ChatTemplateStrategy(
ChatTemplatePrompter(**prompter_params), tokenizer=tokenizer, **strategy_params
)
if "field_messages" in ds_cfg and hasattr(strategy, "messages"):
strategy.messages = ds_cfg["field_messages"]
return strategy
|
OpenAccess-AI-CollectiveREPO_NAMEaxolotlPATH_START.@axolotl_extracted@axolotl-main@src@axolotl@prompt_strategies@[email protected]_END.py
|
{
"filename": "loss.py",
"repo_name": "Herculens/herculens",
"repo_path": "herculens_extracted/herculens-main/herculens/Inference/legacy/loss.py",
"type": "Python"
}
|
# Defines the full loss function, from likelihood, prior and regularization terms
#
# Copyright (c) 2021, herculens developers and contributors
__author__ = 'aymgal', 'austinpeel'
import numpy as np
import jax.numpy as jnp
from jax import jit
from herculens.Inference.legacy.base_differentiable import Differentiable
from herculens.Util.jax_util import WaveletTransform
from herculens.Util import model_util
__all__ = ['Loss']
class Loss(Differentiable):
# TODO: creates subclasses Likelihood, Regularization and Prior to abstract out some of the methods here
"""
Class that manages the (auto-differentiable) loss function, defined as:
L = - log(likelihood) - log(prior) - log(regularization)
Note that gradient, hessian, etc. are computed in the InferenceBase class.
Supported options are:
- likelihood_type [default: 'chi2']: single choice among
'chi2', 'reduced_chi2', 'l2_norm'
- regularization_terms [default: None]: a list containing choices among
- for a 'PIXELATED' source or lens light: 'l1_starlet_source', 'l1_battle_source', 'positivity_source'
- for a 'PIXELATED' lens potential: 'l1_starlet_potential', 'l1_battle_potential', 'positivity_potential'
- prior_terms [default: None]: a list containing choices among
'uniform', 'gaussian'
"""
_supported_ll = ('chi2', 'reduced_chi2', 'l2_norm')
_supported_regul_source = (
'l1_starlet_source',
'l1_battle_source',
'positivity_source'
)
_supported_regul_lens_mass = (
'l1_starlet_potential',
'l1_battle_potential',
'positivity_potential',
'negativity_potential',
'positivity_convergence',
'analytical_potential', # TEST: by default, regularize the penultimate (index -2) lens profile with the last one (index -1)
)
_supported_regul_lens_light = ('l1_starlet_lens_light', 'l1_battle_lens_light', 'positivity_lens_light')
_supported_prior = ('uniform', 'gaussian')
def __init__(self, data, image_class, param_class,
likelihood_type='chi2', likelihood_mask=None,
regularization_terms=None, regularization_strengths=None,
regularization_weights=None, regularization_masks=None,
prior_terms=None, starlet_second_gen=False, index_analytical_potential=None):
self._data = data
self._image = image_class
self._param = param_class
self._check_choices(likelihood_type, prior_terms,
regularization_terms, regularization_strengths,
regularization_weights, regularization_masks)
self._init_likelihood(likelihood_type, likelihood_mask)
self._init_regularizations(regularization_terms,
regularization_strengths,
regularization_weights,
regularization_masks,
starlet_second_gen, index_analytical_potential)
self._init_priors(prior_terms)
def _func(self, args):
"""negative log(likelihood*prior*regularization)"""
kwargs = self._param.args2kwargs(args)
model = self._image.model(**kwargs, k_lens=self._regul_k_lens)
neg_log_ll = - self.log_likelihood(model)
neg_log_reg = - self.log_regularization(kwargs)
neg_log_p = - self.log_prior(args)
neg_log = neg_log_ll + neg_log_reg + neg_log_p
neg_log /= self._global_norm # to keep loss magnitude in acceptable range
return jnp.nan_to_num(neg_log, nan=1e15, posinf=1e15, neginf=1e15)
@property
def likelihood_num_data_points(self):
return self._ll_num_data_points
@property
def likelihood_mask(self):
return self._ll_mask
@property
def data(self):
return self._data
def _check_choices(self, likelihood_type, prior_terms,
regularization_terms, regularization_strengths,
regularization_weights, regularization_masks):
if likelihood_type not in self._supported_ll:
raise ValueError(f"Likelihood term '{likelihood_type}' is not supported")
if prior_terms is not None:
for term in prior_terms:
if term not in self._supported_prior:
raise ValueError(f"Prior term '{term}' is not supported")
if regularization_terms is not None:
if len(regularization_terms) != len(regularization_strengths):
raise ValueError(f"There should be at least one choice of "
"regularization strength per regularization term.")
if likelihood_type in ['chi2', 'reduced_chi2']:
UserWarning(f"Likelihood type is '{likelihood_type}', which might "
"cause issues with some regularization choices")
for term in regularization_terms:
if term not in (self._supported_regul_source +
self._supported_regul_lens_mass +
self._supported_regul_lens_light):
raise ValueError(f"Regularization term '{term}' is not supported")
# TODO: if any regularization terms are not dependent on PIXELATED profiles
# need to update these checks below
if (term in self._supported_regul_source and
'PIXELATED' not in self._image.SourceModel.profile_type_list):
raise ValueError(f"Regularization term '{term}' is only "
"compatible with a 'PIXELATED' source light profile")
if (term in self._supported_regul_lens_mass and
'PIXELATED' not in self._image.MassModel.profile_type_list):
raise ValueError(f"Regularization term '{term}' is only "
"compatible with a 'PIXELATED' lens profile")
if (term in self._supported_regul_lens_light and
'PIXELATED' not in self._image.LensLightModel.profile_type_list):
raise ValueError(f"Regularization term '{term}' is only "
"compatible with a 'PIXELATED' lens profile")
def _init_likelihood(self, likelihood_type, likelihood_mask):
if likelihood_mask is None:
self._ll_mask = np.ones_like(self._data)
else:
self._ll_mask = likelihood_mask.astype(float)
self._ll_num_data_points = np.count_nonzero(self._ll_mask)
if likelihood_type == 'chi2':
self.log_likelihood = self.log_likelihood_chi2
self._global_norm = 1.
elif likelihood_type == 'reduced_chi2':
self.log_likelihood = self.log_likelihood_chi2
self._global_norm = 0.5 * self.likelihood_num_data_points
elif likelihood_type == 'l2_norm':
self.log_likelihood = self.log_likelihood_l2
# here the global norm is such that l2_norm has same order of magnitude as a chi2
self._global_norm = 1.0 # 0.5 * self._image.Grid.num_pixel * np.mean(self._image.Noise.C_D)
def _init_regularizations(self, regularization_terms, regularization_strengths,
regularization_weights, regularization_masks,
starlet_second_gen, index_analytical_potential):
self._regul_k_lens = None # TEMPORARY
if regularization_terms is None:
self.log_regularization = lambda kwargs: 0. # no regularization
return
if regularization_masks is None:
regularization_masks = [None]*len(regularization_terms)
# TODO: implement regularization_weights for source regularization as well (for now it's only potential)
i = 0
regularization_weights_fix = []
for term in regularization_terms:
if 'potential' in term:
regularization_weights_fix.append(regularization_weights[i])
i += 1
else:
# TEMPORARY: just to populate weights for regularization terms other than potential
# waiting for the source and lens light weights to be handled as well.
regularization_weights_fix.append(None)
self._idx_pix_src = self._image.SourceModel.pixelated_index
self._idx_pix_pot = self._image.MassModel.pixelated_index
self._idx_pix_ll = self._image.LensLightModel.pixelated_index
regul_func_list = []
for term, strength, weights, mask in zip(regularization_terms,
regularization_strengths,
regularization_weights_fix,
regularization_masks):
# add the log-regularization function to the list
regul_func_list.append(getattr(self, '_log_regul_'+term))
if term == 'l1_starlet_source':
n_pix_src = min(*self._image.SourceModel.pixelated_shape)
n_scales = int(np.log2(n_pix_src)) # maximum allowed number of scales
self._starlet_src = WaveletTransform(n_scales, wavelet_type='starlet',
second_gen=starlet_second_gen)
wavelet_norms = self._starlet_src.scale_norms[:-1] # ignore coarsest scale
self._st_src_norms = jnp.expand_dims(wavelet_norms, (1, 2))
if isinstance(strength, (int, float)):
self._st_src_lambda = self._st_src_lambda_hf = float(strength)
elif isinstance(strength, (tuple, list)):
if len(strength) > 2:
raise ValueError("You can only specify two starlet regularization "
"strength values at maximum")
self._st_src_lambda_hf = float(strength[0])
self._st_src_lambda = float(strength[1])
elif term == 'l1_starlet_lens_light':
n_pix_ll = min(*self._image.LensLightModel.pixelated_shape)
n_scales = int(np.log2(n_pix_ll)) # maximum allowed number of scales
self._starlet_ll = WaveletTransform(n_scales, wavelet_type='starlet',
second_gen=starlet_second_gen)
wavelet_norms = self._starlet_ll.scale_norms[:-1] # ignore coarsest scale
self._st_ll_norms = jnp.expand_dims(wavelet_norms, (1, 2))
if isinstance(strength, (int, float)):
self._st_ll_lambda = float(strength)
self._st_ll_lambda_hf = float(strength)
elif isinstance(strength, (tuple, list)):
if len(strength) > 2:
raise ValueError("You can only specify two starlet regularization "
"strength values at maximum")
self._st_ll_lambda_hf = float(strength[0])
self._st_ll_lambda = float(strength[1])
elif term == 'l1_battle_source':
n_scales = 1 # maximum allowed number of scales
self._battle_src = WaveletTransform(n_scales, wavelet_type='battle-lemarie-3')
self._bl_src_norm = self._battle_src.scale_norms[0] # consider only first scale
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for Battle-Lemarie regularization")
self._bl_src_lambda = float(strength)
elif term == 'l1_battle_lens_light':
n_scales = 1 # maximum allowed number of scales
self._battle_ll = WaveletTransform(n_scales, wavelet_type='battle-lemarie-3')
self._bl_ll_norm = self._battle_ll.scale_norms[0] # consider only first scale
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for Battle-Lemarie regularization")
self._bl_ll_lambda = float(strength)
elif term == 'positivity_source':
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for positivity constraint")
self._pos_src_lambda = float(strength)
elif term == 'positivity_lens_light':
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for positivity constraint")
self._pos_ll_lambda = float(strength)
elif term == 'l1_starlet_potential':
n_pix_pot = min(*self._image.MassModel.pixelated_shape)
n_scales = int(np.log2(n_pix_pot)) # maximum allowed number of scales
self._starlet_pot = WaveletTransform(n_scales, wavelet_type='starlet',
second_gen=starlet_second_gen)
wavelet_norms = self._starlet_pot.scale_norms[:-1] # ignore coarsest scale
# self._st_pot_norms = jnp.expand_dims(wavelet_norms, (1, 2))
if weights.shape[0] != n_scales+1:
raise ValueError(f"The weights do not contain enough wavelet scales"
f" (should be {n_scales+1} inc. coarsest).")
self._st_pot_weigths = weights
if isinstance(strength, (int, float)):
self._st_pot_lambda = float(strength)
self._st_pot_lambda_hf = float(strength)
elif isinstance(strength, (tuple, list)):
if len(strength) > 2:
raise ValueError("You can only specify two starlet regularization "
"strength values at maximum")
self._st_pot_lambda_hf = float(strength[0])
self._st_pot_lambda = float(strength[1])
elif term == 'l1_battle_potential':
n_scales = 1 # maximum allowed number of scales
self._battle_pot = WaveletTransform(n_scales, wavelet_type='battle-lemarie-3')
# self._bl_pot_norm = self._battle_pot.scale_norms[0] # consider only first scale
if weights.shape[0] != n_scales+1:
raise ValueError(f"The weights do not contain enogh wavelet scales"
f" (should be {n_scales+1} inc. coarsest).")
self._bl_pot_weigths = weights
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for Battle-Lemarie regularization")
self._bl_pot_lambda = float(strength)
elif term == 'positivity_potential':
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for positivity constraint")
self._pos_pot_lambda = float(strength)
elif term == 'negativity_potential':
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for positivity constraint")
self._neg_pot_lambda = float(strength)
elif term == 'positivity_convergence':
if isinstance(strength, (tuple, list)):
raise ValueError("You can only specify one regularization "
"strength for positivity constraint")
self._pos_conv_lambda = float(strength)
self._x_lens, self._y_lens = self._image.Grid.model_pixel_coordinates('lens')
elif term == 'analytical_potential':
if index_analytical_potential is None:
raise ValueError("For analytical potential regularization, a `index_analytical_potential` is required.")
self._idx_ana_pot = index_analytical_potential
self._regul_k_lens = tuple([True if i != self._idx_ana_pot else False for i in range(len(self._image.MassModel.profile_type_list))])
self._weigths = weights
self._lambda = float(strength)
self._mask = mask
self._x_lens, self._y_lens = self._image.Grid.model_pixel_coordinates('lens')
# build the composite function (sum of regularization terms)
self.log_regularization = lambda kw: sum([func(kw) for func in regul_func_list])
def _init_priors(self, prior_terms):
if prior_terms is None:
self.log_prior = lambda args: 0.
return
if prior_terms == ['uniform']:
self.log_prior = self._param.log_prior_uniform
elif prior_terms == ['gaussian']:
self.log_prior = self._param.log_prior_gaussian
elif 'gaussian' in prior_terms and 'uniform' in prior_terms:
self.log_prior = self._param.log_prior
# def log_likelihood_gaussian(self, model):
# C_D = self._image.Noise.C_D_model(model)
# det_C_D = jnp.prod(noise_var) # knowing that C_D is diagonal
# #print("det_C_D", det_C_D)
# Z_D = np.sqrt( (2*np.pi)**self.likelihood_num_data_points * det_C_D ) # Eq. 24 from Vegetti & Koopmans 2009
# chi2 = - 0.5 * jnp.sum( (self._data - model)**2 * self.likelihood_mask / C_D )
# return jnp.log(Z_D) + chi2
def log_likelihood_chi2(self, model):
noise_var = self._image.Noise.C_D_model(model)
# noise_var = self._image.Noise.C_D
residuals = (self._data - model) * self.likelihood_mask
return - 0.5 * jnp.sum(residuals**2 / noise_var)
def log_likelihood_l2(self, model):
# TODO: check that mask here does not mess up with the balance between l2-norm and wavelet regularization
residuals = (self._data - model) * self.likelihood_mask
return - 0.5 * jnp.sum(residuals**2)
def _log_regul_l1_starlet_source(self, kwargs):
model = self._image.model(**kwargs)
noise_map = jnp.sqrt(self._image.Noise.C_D_model(model)) # TODO: do not take into account shot noise from lens light
noise_level = jnp.mean(noise_map[self.likelihood_mask == 1])
source_model = kwargs['kwargs_source'][self._idx_pix_src]['pixels']
st = self._starlet_src.decompose(source_model)[:-1] # ignore coarsest scale
st_weighted_l1_hf = jnp.sum(self._st_src_norms[0] * noise_level * jnp.abs(st[0])) # first scale (i.e. high frequencies)
st_weighted_l1 = jnp.sum(self._st_src_norms[1:] * noise_level * jnp.abs(st[1:])) # other scales
return - (self._st_src_lambda_hf * st_weighted_l1_hf + self._st_src_lambda * st_weighted_l1)
def _log_regul_l1_starlet_lens_light(self, kwargs):
# TODO: generalise this for Poisson noise! but then the noise needs to be properly propagated to source plane
noise_map = np.sqrt(self._image.Noise.C_D)
# TEST reweight the noise map based on lensed source model
#lensed_source_model = self._image.source_surface_brightness(kwargs['kwargs_source'],
# kwargs_lens=kwargs['kwargs_lens'],
# de_lensed=False, unconvolved=True)
#noise_level = noise_map # + lensed_source_model**3
noise_level = np.mean(noise_map[self.likelihood_mask == 1])
# end TEST
model = kwargs['kwargs_lens_light'][self._idx_pix_ll]['pixels']
st = self._starlet_ll.decompose(model)[:-1] # ignore coarsest scale
st_weighted_l1_hf = jnp.sum(self._st_ll_norms[0] * noise_level * jnp.abs(st[0])) # first scale (i.e. high frequencies)
st_weighted_l1 = jnp.sum(self._st_ll_norms[1:] * noise_level * jnp.abs(st[1:])) # other scales
return - (self._st_ll_lambda_hf * st_weighted_l1_hf + self._st_ll_lambda * st_weighted_l1)
def _log_regul_l1_starlet_potential(self, kwargs):
weights = self._st_pot_weigths
psi_model = kwargs['kwargs_lens'][self._idx_pix_pot]['pixels']
st = self._starlet_pot.decompose(psi_model)
st_weighted_l1_hf = jnp.sum(jnp.abs(weights[0] * st[0])) # first scale (i.e. high frequencies)
st_weighted_l1 = jnp.sum(jnp.abs(weights[1:-1] * st[1:-1])) # other scales (except coarsest)
return - (self._st_pot_lambda_hf * st_weighted_l1_hf + self._st_pot_lambda * st_weighted_l1)
def _log_regul_l1_battle_source(self, kwargs):
model = self._image.model(**kwargs)
noise_map = jnp.sqrt(self._image.Noise.C_D_model(model)) # TODO: do not take into account shot noise from lens light
noise_level = jnp.mean(noise_map[self.likelihood_mask == 1])
source_model = kwargs['kwargs_source'][self._idx_pix_src]['pixels']
bl = self._battle_src.decompose(source_model)[0] # consider only first scale
bl_weighted_l1 = jnp.sum(self._bl_src_norm * noise_level * jnp.abs(bl))
return - self._bl_src_lambda * bl_weighted_l1
def _log_regul_l1_battle_lens_light(self, kwargs):
# TODO: generalise this for Poisson noise! but then the noise needs to be properly propagated to source plane
noise_map = np.sqrt(self._image.Noise.C_D)
noise_level = np.mean(noise_map[self.likelihood_mask == 1])
#noise_level = noise_map
model = kwargs['kwargs_lens_light'][self._idx_pix_ll]['pixels']
bl = self._battle_ll.decompose(model)[0] # consider only first scale
bl_weighted_l1 = jnp.sum(self._bl_ll_norm * noise_level * jnp.abs(bl))
return - self._bl_ll_lambda * bl_weighted_l1
def _log_regul_l1_battle_potential(self, kwargs):
weights = self._bl_pot_weigths
psi_model = kwargs['kwargs_lens'][self._idx_pix_pot]['pixels']
bl = self._battle_pot.decompose(psi_model)
bl_weighted_l1 = jnp.sum(jnp.abs(weights[0] * bl[0])) # only first BL scale
return - self._bl_pot_lambda * bl_weighted_l1
def _log_regul_positivity_source(self, kwargs):
source_model = kwargs['kwargs_source'][self._idx_pix_src]['pixels']
return - self._pos_src_lambda * jnp.abs(jnp.sum(jnp.minimum(0., source_model)))
def _log_regul_positivity_lens_light(self, kwargs):
model = kwargs['kwargs_lens_light'][self._idx_pix_ll]['pixels']
return - self._pos_ll_lambda * jnp.abs(jnp.sum(jnp.minimum(0., model)))
def _log_regul_positivity_potential(self, kwargs):
psi_model = kwargs['kwargs_lens'][self._idx_pix_pot]['pixels']
return - self._pos_pot_lambda * jnp.abs(jnp.sum(jnp.minimum(0., psi_model)))
def _log_regul_negativity_potential(self, kwargs):
psi_model = kwargs['kwargs_lens'][self._idx_pix_pot]['pixels']
return - self._neg_pot_lambda * jnp.abs(jnp.sum(jnp.maximum(0., psi_model)))
def _log_regul_positivity_convergence(self, kwargs):
kappa_model = self._image.MassModel.kappa(self._x_lens,
self._y_lens,
kwargs['kwargs_lens'],
k=self._idx_pix_pot)
return - self._pos_conv_lambda * jnp.abs(jnp.sum(jnp.minimum(0., kappa_model)))
def _log_regul_analytical_potential(self, kwargs):
psi_model = kwargs['kwargs_lens'][self._idx_pix_pot]['pixels']
target_model = self._image.MassModel.potential(self._x_lens, self._y_lens,
kwargs['kwargs_lens'],
k=self._idx_ana_pot)
return - self._lambda * jnp.sum(self._mask * self._weigths * (psi_model - target_model)**2)
# or similar to Tagore & Keeton 2014 (does not seem to work tho)
#return - self._lambda * (jnp.sum(self._mask * self._weigths * psi_model / target_model) - jnp.sum(self._mask) * jnp.mean(psi_model / target_model))
|
HerculensREPO_NAMEherculensPATH_START.@herculens_extracted@herculens-main@herculens@Inference@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/tsa/arima/tests/__init__.py",
"type": "Python"
}
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@tsa@arima@tests@[email protected]_END.py
|
|
{
"filename": "_minor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/_minor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MinorValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="minor", parent_name="layout.yaxis", **kwargs):
super(MinorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Minor"),
data_docs=kwargs.pop(
"data_docs",
"""
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickcolor
Sets the tick color.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@yaxis@[email protected]_END.py
|
{
"filename": "anis_coefficients.py",
"repo_name": "nanograv/enterprise",
"repo_path": "enterprise_extracted/enterprise-master/enterprise/signals/anis_coefficients.py",
"type": "Python"
}
|
# anis_coefficients.py
import healpy as hp
import numpy as np
import scipy.special as ss
"""
Script to compute the correlation basis-functions for various anisotropic
configurations of the GW background energy-density
-- Rutger van Haasteren (June 2014)
-- Stephen Taylor (modifications, February 2016)
"""
def real_sph_harm(mm, ll, phi, theta):
"""
The real-valued spherical harmonics.
"""
if mm > 0:
ans = (1.0 / np.sqrt(2)) * (ss.sph_harm(mm, ll, phi, theta) + ((-1) ** mm) * ss.sph_harm(-mm, ll, phi, theta))
elif mm == 0:
ans = ss.sph_harm(0, ll, phi, theta)
elif mm < 0:
ans = (1.0 / (np.sqrt(2) * complex(0.0, 1))) * (
ss.sph_harm(-mm, ll, phi, theta) - ((-1) ** mm) * ss.sph_harm(mm, ll, phi, theta)
)
return ans.real
def signalResponse_fast(ptheta_a, pphi_a, gwtheta_a, gwphi_a):
"""
Create the signal response matrix FAST
"""
# Create a meshgrid for both phi and theta directions
gwphi, pphi = np.meshgrid(gwphi_a, pphi_a)
gwtheta, ptheta = np.meshgrid(gwtheta_a, ptheta_a)
return createSignalResponse(pphi, ptheta, gwphi, gwtheta)
def createSignalResponse(pphi, ptheta, gwphi, gwtheta):
"""
Create the signal response matrix. All parameters are assumed to be of the
same dimensionality.
@param pphi: Phi of the pulsars
@param ptheta: Theta of the pulsars
@param gwphi: Phi of GW propagation direction
@param gwtheta: Theta of GW propagation direction
@return: Signal response matrix of Earth-term
"""
Fp = createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=True)
Fc = createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=False)
# Pixel maps are lumped together, polarization pixels are neighbours
F = np.zeros((Fp.shape[0], 2 * Fp.shape[1]))
F[:, 0::2] = Fp
F[:, 1::2] = Fc
return F
def createSignalResponse_pol(pphi, ptheta, gwphi, gwtheta, plus=True, norm=True):
"""
Create the signal response matrix. All parameters are assumed to be of the
same dimensionality.
@param pphi: Phi of the pulsars
@param ptheta: Theta of the pulsars
@param gwphi: Phi of GW propagation direction
@param gwtheta: Theta of GW propagation direction
@param plus: Whether or not this is the plus-polarization
@param norm: Normalise the correlations to equal Jenet et. al (2005)
@return: Signal response matrix of Earth-term
"""
# Create the unit-direction vectors. First dimension
# will be collapsed later. Sign convention of Gair et al. (2014)
Omega = np.array([-np.sin(gwtheta) * np.cos(gwphi), -np.sin(gwtheta) * np.sin(gwphi), -np.cos(gwtheta)])
mhat = np.array([-np.sin(gwphi), np.cos(gwphi), np.zeros(gwphi.shape)])
nhat = np.array([-np.cos(gwphi) * np.cos(gwtheta), -np.cos(gwtheta) * np.sin(gwphi), np.sin(gwtheta)])
p = np.array([np.cos(pphi) * np.sin(ptheta), np.sin(pphi) * np.sin(ptheta), np.cos(ptheta)])
# There is a factor of 3/2 difference between the Hellings & Downs
# integral, and the one presented in Jenet et al. (2005; also used by Gair
# et al. 2014). This factor 'normalises' the correlation matrix.
npixels = Omega.shape[2]
if norm:
# Add extra factor of 3/2
c = np.sqrt(1.5) / np.sqrt(npixels)
else:
c = 1.0 / np.sqrt(npixels)
# Calculate the Fplus or Fcross antenna pattern. Definitions as in Gair et
# al. (2014), with right-handed coordinate system
if plus:
# The sum over axis=0 represents an inner-product
Fsig = (
0.5 * c * (np.sum(nhat * p, axis=0) ** 2 - np.sum(mhat * p, axis=0) ** 2) / (1 - np.sum(Omega * p, axis=0))
)
else:
# The sum over axis=0 represents an inner-product
Fsig = c * np.sum(mhat * p, axis=0) * np.sum(nhat * p, axis=0) / (1 - np.sum(Omega * p, axis=0))
return Fsig
def almFromClm(clm):
"""
Given an array of clm values, return an array of complex alm valuex
Note: There is a bug in healpy for the negative m values. This function
just takes the imaginary part of the abs(m) alm index.
"""
maxl = int(np.sqrt(len(clm))) - 1
nalm = hp.Alm.getsize(maxl)
alm = np.zeros((nalm), dtype=np.complex128)
clmindex = 0
for ll in range(0, maxl + 1):
for mm in range(-ll, ll + 1):
almindex = hp.Alm.getidx(maxl, ll, abs(mm))
if mm == 0:
alm[almindex] += clm[clmindex]
elif mm < 0:
alm[almindex] -= 1j * clm[clmindex] / np.sqrt(2)
elif mm > 0:
alm[almindex] += clm[clmindex] / np.sqrt(2)
clmindex += 1
return alm
def clmFromAlm(alm):
"""
Given an array of clm values, return an array of complex alm valuex
Note: There is a bug in healpy for the negative m values. This function
just takes the imaginary part of the abs(m) alm index.
"""
nalm = len(alm)
maxl = int(np.sqrt(9.0 - 4.0 * (2.0 - 2.0 * nalm)) * 0.5 - 1.5) # Really?
nclm = (maxl + 1) ** 2
# Check the solution. Went wrong one time..
if nalm != int(0.5 * (maxl + 1) * (maxl + 2)):
raise ValueError("Check numerical precision. This should not happen")
clm = np.zeros(nclm)
clmindex = 0
for ll in range(0, maxl + 1):
for mm in range(-ll, ll + 1):
almindex = hp.Alm.getidx(maxl, ll, abs(mm))
if mm == 0:
clm[clmindex] = alm[almindex].real
elif mm < 0:
clm[clmindex] = -alm[almindex].imag * np.sqrt(2)
elif mm > 0:
clm[clmindex] = alm[almindex].real * np.sqrt(2)
clmindex += 1
return clm
def mapFromClm_fast(clm, nside):
"""
Given an array of C_{lm} values, produce a pixel-power-map (non-Nested) for
healpix pixelation with nside
@param clm: Array of C_{lm} values (inc. 0,0 element)
@param nside: Nside of the healpix pixelation
return: Healpix pixels
Use Healpix spherical harmonics for computational efficiency
"""
maxl = int(np.sqrt(len(clm))) - 1
alm = almFromClm(clm)
h = hp.alm2map(alm, nside, maxl, verbose=False)
return h
def mapFromClm(clm, nside):
"""
Given an array of C_{lm} values, produce a pixel-power-map (non-Nested) for
healpix pixelation with nside
@param clm: Array of C_{lm} values (inc. 0,0 element)
@param nside: Nside of the healpix pixelation
return: Healpix pixels
Use real_sph_harm for the map
"""
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
h = np.zeros(npixels)
ind = 0
maxl = int(np.sqrt(len(clm))) - 1
for ll in range(maxl + 1):
for mm in range(-ll, ll + 1):
h += clm[ind] * real_sph_harm(mm, ll, pixels[1], pixels[0])
ind += 1
return h
def clmFromMap_fast(h, lmax):
"""
Given a pixel map, and a maximum l-value, return the corresponding C_{lm}
values.
@param h: Sky power map
@param lmax: Up to which order we'll be expanding
return: clm values
Use Healpix spherical harmonics for computational efficiency
"""
alm = hp.sphtfunc.map2alm(h, lmax=lmax)
alm[0] = np.sum(h) * np.sqrt(4 * np.pi) / len(h)
return clmFromAlm(alm)
def clmFromMap(h, lmax):
"""
Given a pixel map, and a maximum l-value, return the corresponding C_{lm}
values.
@param h: Sky power map
@param lmax: Up to which order we'll be expanding
return: clm values
Use real_sph_harm for the map
"""
npixels = len(h)
nside = hp.npix2nside(npixels)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
clm = np.zeros((lmax + 1) ** 2)
ind = 0
for ll in range(lmax + 1):
for mm in range(-ll, ll + 1):
clm[ind] += np.sum(h * real_sph_harm(mm, ll, pixels[1], pixels[0]))
ind += 1
return clm * 4 * np.pi / npixels
def getCov(clm, nside, F_e):
"""
Given a vector of clm values, construct the covariance matrix
@param clm: Array with Clm values
@param nside: Healpix nside resolution
@param F_e: Signal response matrix
@return: Cross-pulsar correlation for this array of clm values
"""
# Create a sky-map (power)
# Use mapFromClm to compare to real_sph_harm. Fast uses Healpix
# sh00 = mapFromClm(clm, nside)
sh00 = mapFromClm_fast(clm, nside)
# Double the power (one for each polarization)
sh = np.array([sh00, sh00]).T.flatten()
# Create the cross-pulsar covariance
hdcov_F = np.dot(F_e * sh, F_e.T)
# The pulsar term is added (only diagonals: uncorrelated)
return hdcov_F + np.diag(np.diag(hdcov_F))
def anis_basis(psr_locs, lmax, nside=32):
"""
Calculate the correlation basis matrices using the pixel-space
transormations
@param psr_locs: Location of the pulsars [phi, theta]
@param lmax: Maximum l to go up to
@param nside: What nside to use in the pixelation [32]
Note: GW directions are in direction of GW propagation
"""
pphi = psr_locs[:, 0]
ptheta = psr_locs[:, 1]
# Create the pixels
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
gwtheta = pixels[0]
gwphi = pixels[1]
# Create the signal response matrix
F_e = signalResponse_fast(ptheta, pphi, gwtheta, gwphi)
# Loop over all (l,m)
basis = []
nclm = (lmax + 1) ** 2
clmindex = 0
for ll in range(0, lmax + 1):
for mm in range(-ll, ll + 1):
clm = np.zeros(nclm)
clm[clmindex] = 1.0
basis.append(getCov(clm, nside, F_e))
clmindex += 1
return np.array(basis)
def orfFromMap_fast(psr_locs, usermap, response=None):
"""
Calculate an ORF from a user-defined sky map.
@param psr_locs: Location of the pulsars [phi, theta]
@param usermap: Provide a healpix map for GW power
Note: GW directions are in direction of GW propagation
"""
if response is None:
pphi = psr_locs[:, 0]
ptheta = psr_locs[:, 1]
# Create the pixels
nside = hp.npix2nside(len(usermap))
npixels = hp.nside2npix(nside)
pixels = hp.pix2ang(nside, np.arange(npixels), nest=False)
gwtheta = pixels[0]
gwphi = pixels[1]
# Create the signal response matrix
F_e = signalResponse_fast(ptheta, pphi, gwtheta, gwphi)
elif response is not None:
F_e = response
# Double the power (one for each polarization)
sh = np.array([usermap, usermap]).T.flatten()
# Create the cross-pulsar covariance
hdcov_F = np.dot(F_e * sh, F_e.T)
# The pulsar term is added (only diagonals: uncorrelated)
return hdcov_F + np.diag(np.diag(hdcov_F))
|
nanogravREPO_NAMEenterprisePATH_START.@enterprise_extracted@enterprise-master@enterprise@signals@[email protected]_END.py
|
{
"filename": "draw_contri_CO2.py",
"repo_name": "fjdu/rac-2d",
"repo_path": "rac-2d_extracted/rac-2d-master/utils_python/draw/draw_contri_CO2.py",
"type": "Python"
}
|
from matplotlib import *
use('Agg')
from matplotlib.pyplot import *
from numpy import *
from os.path import join as opj
from glob import glob
from parse_ana import *
from long_function_definitions import *
from my_script import *
import drawing
mycm = make_my_colormap(c_list=[(0.0, (0.5, 0.0, 0.5)),
(0.2, (0.0, 0.0, 1.0)),
(0.4, (0.0, 0.8, 1.0)),
(0.6, (0.0, 0.8, 0.0)),
(0.8, (1.0, 0.8, 0.0)),
(1.0, (1.0, 0.0, 0.0))])
from cycler import cycler
rcParams['axes.prop_cycle'] = cycler(color=mycolors)
from scipy.interpolate import griddata
seterr(divide='ignore')
def plot_contribution_function(c, fig_fname,
nn=200, xmin=0, xmax=20, ymin=-10, ymax=10, nlev=100,
thr=1e-5, normalize=True,
return_data = False,
xscale='linear', yscale='linear',
figsize=(8,8)):
#
r = sqrt(c[:,0]**2 + c[:,1]**2)
#
xi = linspace(xmin, xmax, nn)
yi = linspace(ymin, ymax, nn)
#
xi, yi = np.meshgrid(xi, yi)
maxval = c[:,3].max()
c[c[:,3] < maxval*thr, 3] = maxval*thr #0.0
#
zi = griddata((r, c[:,2]), log10(c[:,3]), (xi, yi), method='linear')
if normalize:
zi = zi - np.nanmax(zi)
#
##xi = 1.15 * xi
#
f = figure(figsize=figsize)
pos = (0.15, 0.15, 0.8, 0.4)
ax = f.add_axes(pos,
xlabel='r (AU)',
ylabel='z (AU)',
autoscalex_on=False, autoscaley_on=False,
xscale=xscale, yscale=yscale,
xlim=(xmin, xmax), ylim=(ymin, ymax))
#
C = ax.contourf(xi, yi, zi, nlev, cmap=mycm, extend='neither')
for _ in C.collections:
_.set_rasterized(True)
#colorbar(C)
#set_axis_format(ax, graygrid=True, majorgridon=True, minorgridon=False)
pos = (0.15, 0.6, 0.8, 0.03)
cax = f.add_axes(pos)
drawing.add_colorbar(cax, np.nanmin(zi), np.nanmax(zi), mycm,
label='', title='log$_{10}$ (Relative contributions)',
orientation='horizontal', scale='linear')
#
flx = xi * 10**zi
idx = logical_not(isfinite(zi))
flx[idx] = 0.0
flx = sum(flx, axis=0)
flx_t = sum(flx)
flx = flx / flx_t
pos = (0.15, 0.68, 0.8, 0.33)
#maxval = flx.max()
#maxtik = ceil(maxval/0.02)*0.02
#yticks = linspace(0,maxtik,num=int(maxtik/0.02)+1)
ax = f.add_axes(pos,
xlabel='', ylabel='',
xticklabels=[],
#yticks=yticks,
autoscalex_on=False, autoscaley_on=False,
xscale=xscale, yscale=yscale,
xlim=(xmin, xmax), ylim=(-0.1,1.1))
x_, y_ = loglin_interpol(xi[0,:], flx, num=100, method='linear',
dosmooth=True, winwidth=3)
y_ = y_ / np.nanmax(y_)
ax.plot(x_, y_, lw=2, color='red', label='Contrib.')
fsum = cumsum(y_)
fsum = fsum/np.nanmax(fsum)
#set_axis_format(ax, majorgridon=False, minorgridon=False)
#
ax.plot(x_, fsum, lw=1, color='blue', label='Accum. Contrib.')
ax.legend(loc="center right", fontsize=15)
#
savefig(fig_fname, bbox_inches='tight')
#
if return_data:
return xi, yi, zi
else:
return
def plot_contribution_together(c_s, labels, fig_fname,
show_scaling=False,
nn=200, xmin=0, xmax=20, ymin=-10, ymax=10, nlev=100,
thr=1e-5, normalize=True,
return_data = False,
xscale='linear', yscale='linear',
figsize=(8,5)):
#
f = figure(figsize=figsize)
pos = (0.15, 0.15, 0.8, 0.8)
#
for icount, c in enumerate(c_s):
r = sqrt(c[:,0]**2 + c[:,1]**2)
#
xi = linspace(xmin, xmax, nn)
yi = linspace(ymin, ymax, nn)
#
xi, yi = np.meshgrid(xi, yi)
maxval = c[:,3].max()
c[c[:,3] < maxval*thr, 3] = 0.0
if normalize:
c[:,3] = c[:,3] / maxval
#
zi = griddata((r, c[:,2]), log10(c[:,3]), (xi, yi), method='linear')
#
idx = logical_not(isfinite(zi))
zi[idx] = -99.0
flx = sum(xi * 10**zi, axis=0)
flx_t = sum(flx)
flx = flx / flx_t
fsum = cumsum(flx)
#maxval = flx.max()
#maxtik = ceil(maxval/0.1)*0.1
#yticks = linspace(0,maxtik,num=int(maxtik/0.1)+1)
if icount == 0:
ax = f.add_axes(pos,
xlabel='r (AU)',
ylabel='Fraction (within r)',
autoscalex_on=False, autoscaley_on=False,
xscale=xscale, yscale=yscale,
xlim=(xmin, xmax), ylim=(0, 1.05))
ax.plot(xi[0], fsum, lw=2, label=labels[icount])
if show_scaling:
coeff = fsum[-1] / (xi[0][-1])**2
ax.plot(xi[0], coeff * xi[0]**2, lw=2, linestyle='--', label='$r^2$ scaling')
#
legend(loc="upper left", fontsize=20)
set_axis_format(ax, majorgridon=False, minorgridon=False)
#
savefig(fig_fname, bbox_inches='tight')
#
return
if __name__ == '__main__':
fig_dir = '/Users/fjdu/work_local/upgrade20180626/figures/'
#
#c = loadtxt('/Users/fjdu/work_local/upgrade20180626/storage/cjmerch/20150307_dep_p1/CO2_0deg/images/line_00001_10941_00001_1.93049E+13_0.00.fits_contri.dat')
#c = loadtxt('/Users/fjdu/work_local/upgrade20180626/storage/cjmerch/20150307_dep_p1/CO2/images/line_00001_10941_00001_1.93049E+13_7.00.fits_contri.dat')
#c = loadtxt('/Users/fjdu/work_local/upgrade20180626/storage/cjmerch/20150307_dep_p1/C2H/images/line_00001_00018_00001_2.62004E+11_7.00.fits_contri.dat')
c = loadtxt('/Users/fjdu/work_local/upgrade20180626/storage/cjmerch/20150307_dep_p1/CO2_14.97/images/line_00010_13905_00001_2.00310E+13_7.00.fits_contri.dat')
fig_fname = opj(fig_dir, 'contri_CO2_14.97.pdf')
plot_contribution_function(c, fig_fname, xmin=-2, xmax=100, ymin=-100, ymax=100, thr=1e-10)
|
fjduREPO_NAMErac-2dPATH_START.@rac-2d_extracted@rac-2d-master@utils_python@draw@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "djones1040/PythonPhot",
"repo_path": "PythonPhot_extracted/PythonPhot-master/README.md",
"type": "Markdown"
}
|
PythonPhot PSF Fitting Photometry Tutorial
=========
getpsf.py : Generates a point-spread function (PSF) from observed stars at
specified locations. Uses the family of "peak fit" modules
(pkfit, pkfit_noise, pkfit_norecent, etc) to fit a gaussian to each
star and define an array of non-gaussian psf residuals.
Returns a 5-element vector defining the gaussian, a 2-d array of
psf residuals, and the magnitude of the psf. Also writes out the
psf model to a fits file with the gaussian parameters in the header
and the residuals in the data array.
rdpsf.py : Read the .fits file created by getpsf.py that contains the
psf model gaussian parameters and 2-d array of residuals.
pkfit.py : fit a psf model to an isolated point source
pkfit_noise : fitting with an input noise image
pkfit_norecent : forced photometry (fitting a peak without recentering)
pkfit_norecent_noise : forced photometry with an input noise image
-----------
# EXAMPLE A : Make a psf model
import getpsf
import aper
import numpy as np
# load FITS image and specify PSF star coordinates
image = pyfits.getdata(fits_filename)
xpos,ypos = np.array([1450,1400]),np.array([1550,1600])
# run aper to get mags and sky values for specified coords
mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr = \
aper.aper(image,xpos,ypos,phpadu=1,apr=5,zeropoint=25,
skyrad=[40,50],badpix=[-12000,60000],exact=True)
# use the stars at those coords to generate a PSF model
gauss,psf,psfmag = \
getpsf.getpsf(image,xpos,ypos,
mag,sky,1,1,np.arange(len(xpos)),
5,'output_psf.fits')
------------
# EXAMPLE B : fit a psf to isolated stars
import pyfits
from PythonPhot import pkfit
# read in the fits images containing the target sources
image = pyfits.getdata(fits_filename)
noiseim = pyfits.getdata(fits_noise_filename)
maskim = pyfits.getdata(fits_mask_filename)
# read in the fits image containing the PSF (gaussian model
# parameters and 2-d residuals array.
psf = pyfits.getdata(psf_filename)
hpsf = pyfits.getheader(psf_filename)
gauss = [hpsf['GAUSS1'],hpsf['GAUSS2'],hpsf['GAUSS3'],hpsf['GAUSS4'],hpsf['GAUSS5']]
# x and y points for PSF fitting
xpos,ypos = np.array([1450,1400]),np.array([1550,1600])
# run 'aper' on x,y coords to get sky values
mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr = \
aper.aper(image,xpos,ypos,phpadu=1,apr=5,zeropoint=25,
skyrad=[40,50],badpix=[-12000,60000],exact=True)
# load the pkfit class
pk = pkfit.pkfit_class(image,gauss,psf,1,1,noiseim,maskim)
# do the PSF fitting
for x,y,s in zip(xpos,ypos,sky):
errmag,chi,sharp,niter,scale = \
pk.pkfit_norecent_noise(1,x,y,s,5)
flux = scale*10**(0.4*(25.-hpsf['PSFMAG']))
dflux = errmag*10**(0.4*(25.-hpsf['PSFMAG']))
print('PSF fit to coords %.2f,%.2f gives flux %s +/- %s'%(x,y,flux,dflux))
|
djones1040REPO_NAMEPythonPhotPATH_START.@PythonPhot_extracted@[email protected]@.PATH_END.py
|
{
"filename": "GoodnessOfFitSingleResult.py",
"repo_name": "freelunchtheorem/Conditional_Density_Estimation",
"repo_path": "Conditional_Density_Estimation_extracted/Conditional_Density_Estimation-master/cde/model_fitting/GoodnessOfFitSingleResult.py",
"type": "Python"
}
|
from collections import OrderedDict
import types
class GoodnessOfFitSingleResult:
def __init__(self, estimator_params, probabilistic_model_params, x_cond=None):
self.cond_values = x_cond
self.time_to_fit = None
self.time_to_predict = None
self.ndim_x = estimator_params["ndim_x"]
self.ndim_y = estimator_params["ndim_y"]
# remove built in functions so that GoodnessOfFitSingleResult remains pickable
if 'X_ph' in estimator_params:
del estimator_params['X_ph']
self.estimator_params = estimator_params
self.probabilistic_model_params = probabilistic_model_params
self.kl_divergence = None
self.hellinger_distance = None
self.wasserstein_distance = None
self.js_divergence = None
self.n_observations = None
self.x_cond = x_cond
self.n_x_cond = len(x_cond) if x_cond is not None else None
self.result_df = None
self.random_seed = probabilistic_model_params['random_seed']
def report_dict(self, keys_of_interest=None):
full_dict = self.__dict__
if keys_of_interest is not None:
report_dict = OrderedDict()
for key in keys_of_interest:
if key in full_dict:
value = full_dict[key]
elif key in self.estimator_params:
value = self.estimator_params[key]
elif key in self.probabilistic_model_params:
value = self.probabilistic_model_params[key]
else:
value = None
if (isinstance(value, list) or isinstance(value, tuple)) and len(value) > 1:
value = str(value)
if callable(value):
value = str(value)
report_dict[key] = value
return report_dict
else:
return full_dict
def __len__(self):
return 1
def __str__(self):
return "KL divergence: %.4f, Hellinger distance: %.4f, Jason-Shannon divergence: %.4f"%(self.kl_divergence, self.hellinger_distance, self.js_divergence)
|
freelunchtheoremREPO_NAMEConditional_Density_EstimationPATH_START.@Conditional_Density_Estimation_extracted@Conditional_Density_Estimation-master@cde@[email protected]@.PATH_END.py
|
{
"filename": "_xhoverformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/_xhoverformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XhoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="xhoverformat", parent_name="scatter3d", **kwargs):
super(XhoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@[email protected]_END.py
|
{
"filename": "_nticks.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcoords/line/colorbar/_nticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NticksValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="nticks", parent_name="parcoords.line.colorbar", **kwargs
):
super(NticksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcoords@line@colorbar@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/extern/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This packages contains python packages that are bundled with Astropy but are
external to Astropy, and hence are developed in a separate source tree. Note
that this package is distinct from the /cextern directory of the source code
distribution, as that directory only contains C extension code.
See the README.rst in this directory of the Astropy source repository for more
details.
"""
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@astropy@extern@[email protected]_END.py
|
{
"filename": "_steps.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/slider/_steps.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StepsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="steps", parent_name="layout.slider", **kwargs):
super(StepsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Step"),
data_docs=kwargs.pop(
"data_docs",
"""
args
Sets the arguments values to be passed to the
Plotly method set in `method` on slide.
execute
When true, the API method is executed. When
false, all other behaviors are the same and
command execution is skipped. This may be
useful when hooking into, for example, the
`plotly_sliderchange` method and executing the
API command manually without losing the benefit
of the slider automatically binding to the
state of the plot through the specification of
`method` and `args`.
label
Sets the text label to appear on the slider
method
Sets the Plotly method to be called when the
slider value is changed. If the `skip` method
is used, the API slider will function as normal
but will perform no API calls and will not bind
automatically to state updates. This may be
used to create a component interface and attach
to slider events manually via JavaScript.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
Sets the value of the slider step, used to
refer to the step programatically. Defaults to
the slider label if not provided.
visible
Determines whether or not this step is included
in the slider.
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@slider@[email protected]_END.py
|
{
"filename": "types.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/third_party/xla/xla/mlir_hlo/tests/python/types.py",
"type": "Python"
}
|
# Copyright 2021 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for Python APIs accessing MHLO types."""
# pylint: disable=wildcard-import,undefined-variable,missing-function-docstring
from mlir import ir
from mlir.dialects import mhlo
def run(f):
with ir.Context() as context:
mhlo.register_mhlo_dialect(context)
f()
return f
@run
def test_token_type():
token_type = mhlo.TokenType.get()
assert token_type is not None
assert str(token_type) == "!mhlo.token"
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@third_party@xla@xla@mlir_hlo@tests@[email protected]@.PATH_END.py
|
{
"filename": "E12_not_first.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/parso/py3/tests/normalizer_issue_files/E12_not_first.py",
"type": "Python"
}
|
# The issue numbers described in this file are part of the pycodestyle tracker
# and not of parso.
# Originally there were no issues in here, I (dave) added the ones that were
# necessary and IMO useful.
if (
x == (
3
) or
y == 4):
pass
y = x == 2 \
or x == 3
#: E129+1:4
if x == 2 \
or y > 1 \
or x == 3:
pass
if x == 2 \
or y > 1 \
or x == 3:
pass
if (foo == bar and
baz == frop):
pass
#: E129+1:4 E129+2:4 E123+3
if (
foo == bar and
baz == frop
):
pass
if (
foo == bar and
baz == frop
#: E129:4
):
pass
a = (
)
a = (123,
)
if start[1] > end_col and not (
over_indent == 4 and indent_next):
assert (0, "E121 continuation line over-"
"indented for visual indent")
abc = "OK", ("visual",
"indent")
abc = "Okay", ("visual",
"indent_three"
)
abc = "a-ok", (
"there",
"dude",
)
abc = "hello", (
"there",
"dude")
abc = "hello", (
"there",
# "john",
"dude")
abc = "hello", (
"there", "dude")
abc = "hello", (
"there", "dude",
)
# Aligned with opening delimiter
foo = long_function_name(var_one, var_two,
var_three, var_four)
# Extra indentation is not necessary.
foo = long_function_name(
var_one, var_two,
var_three, var_four)
arm = 'AAA' \
'BBB' \
'CCC'
bbb = 'AAA' \
'BBB' \
'CCC'
cc = ('AAA'
'BBB'
'CCC')
cc = {'text': 'AAA'
'BBB'
'CCC'}
cc = dict(text='AAA'
'BBB')
sat = 'AAA' \
'BBB' \
'iii' \
'CCC'
abricot = (3 +
4 +
5 + 6)
#: E122+1:4
abricot = 3 + \
4 + \
5 + 6
part = [-1, 2, 3,
4, 5, 6]
#: E128+1:8
part = [-1, (2, 3,
4, 5, 6), 7,
8, 9, 0]
fnct(1, 2, 3,
4, 5, 6)
fnct(1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11)
def long_function_name(
var_one, var_two, var_three,
var_four):
hello(var_one)
if ((row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col)):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
result = {
'foo': [
'bar', {
'baz': 'frop',
}
]
}
foo = my.func({
"foo": "bar",
}, "baz")
fooff(aaaa,
cca(
vvv,
dadd
), fff,
ggg)
fooff(aaaa,
abbb,
cca(
vvv,
aaa,
dadd),
"visual indentation is not a multiple of four",)
if bar:
assert (
start, 'E121 lines starting with a '
'closing bracket should be indented '
"to match that of the opening "
"bracket's line"
)
# you want vertical alignment, so use a parens
if ((foo.bar("baz") and
foo.bar("frop")
)):
hello("yes")
# also ok, but starting to look like LISP
if ((foo.bar("baz") and
foo.bar("frop"))):
hello("yes")
#: E129+1:4 E127+2:9
if (a == 2 or
b == "abc def ghi"
"jkl mno"):
assert True
#: E129+1:4
if (a == 2 or
b == """abc def ghi
jkl mno"""):
assert True
if length > options.max_line_length:
assert options.max_line_length, \
"E501 line too long (%d characters)" % length
# blub
asd = 'l.{line}\t{pos}\t{name}\t{text}'.format(
line=token[2][0],
pos=pos,
name=tokenize.tok_name[token[0]],
text=repr(token[1]),
)
#: E121+1:6 E121+2:6
hello('%-7d %s per second (%d total)' % (
options.counters[key] / elapsed, key,
options.counters[key]))
if os.path.exists(os.path.join(path, PEP8_BIN)):
cmd = ([os.path.join(path, PEP8_BIN)] +
self._pep8_options(targetfile))
fixed = (re.sub(r'\t+', ' ', target[c::-1], 1)[::-1] +
target[c + 1:])
fixed = (
re.sub(r'\t+', ' ', target[c::-1], 1)[::-1] +
target[c + 1:]
)
if foo is None and bar is "frop" and \
blah == 'yeah':
blah = 'yeahnah'
"""This is a multi-line
docstring."""
if blah:
# is this actually readable? :)
multiline_literal = """
while True:
if True:
1
""".lstrip()
multiline_literal = (
"""
while True:
if True:
1
""".lstrip()
)
multiline_literal = (
"""
while True:
if True:
1
"""
.lstrip()
)
if blah:
multiline_visual = ("""
while True:
if True:
1
"""
.lstrip())
rv = {'aaa': 42}
rv.update(dict.fromkeys((
#: E121:4 E121+1:4
'qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'), '?'))
rv.update(dict.fromkeys(('qualif_nr', 'reasonComment_en',
'reasonComment_fr', 'reasonComment_de',
'reasonComment_it'), '?'))
#: E128+1:10
rv.update(dict.fromkeys(('qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'), '?'))
rv.update(dict.fromkeys(
('qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'), '?'
), "foo", context={
'alpha': 4, 'beta': 53242234, 'gamma': 17,
})
rv.update(
dict.fromkeys((
'qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'), '?'),
"foo",
context={
'alpha': 4, 'beta': 53242234, 'gamma': 17,
},
)
event_obj.write(cursor, user_id, {
'user': user,
'summary': text,
'data': data,
})
event_obj.write(cursor, user_id, {
'user': user,
'summary': text,
'data': {'aaa': 1, 'bbb': 2},
})
event_obj.write(cursor, user_id, {
'user': user,
'summary': text,
'data': {
'aaa': 1,
'bbb': 2},
})
event_obj.write(cursor, user_id, {
'user': user,
'summary': text,
'data': {'timestamp': now, 'content': {
'aaa': 1,
'bbb': 2
}},
})
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@parso@py3@tests@normalizer_issue_files@[email protected]_END.py
|
{
"filename": "asciiheader.py",
"repo_name": "spacetelescope/stsdas_stripped",
"repo_path": "stsdas_stripped_extracted/stsdas_stripped-master/stsdas/pkg/analysis/slitless/axe/axe_asciidata/asciiheader.py",
"type": "Python"
}
|
"""
Various header classes to be part of the asciidata class
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: [email protected]
@since: 2005/09/13
$LastChangedBy: mkuemmel $
$LastChangedDate: 2008-07-03 10:27:47 +0200 (Thu, 03 Jul 2008) $
$HeadURL: http://astropy.scipy.org/svn/astrolib/trunk/asciidata/Lib/asciiheader.py $
"""
from __future__ import absolute_import
__version__ = "Version 1.1 $LastChangedRevision: 503 $"
import re
from .asciiutils import *
class Header(object):
"""
The header object
This object offers the possibility to store additional
information such as change comments or column information.
This additional information may just be present at the
beginning of the data file or later be added.
"""
def __init__(self, filename=None, comment_char=None):
"""
Constructor for the Header class
@param filename: the data file
@type filename: string
@param comment_char: the comment_char string
@type comment_char: string
"""
# store the comment_char
self._comment_char = comment_char
# Fullhdata contains the full nonparsed header - probably
# superflupus now
self.Fullhdata = []
# CollInfo is a list of column names extracted from the header
# please note that only is current at readintime and is currently
# not updated when columns are changed
self.CollInfo = []
# SexVectorColls are the known sextractor output parameters which
# come as vectors
self.SexVectorColls = ('MAG_APER','MAGERR_APER','FLUX_RADIUS','FLUX_APER','FLUXERR_APER','VECTOR_SOMFIT','VECTOR_ASSOC','FLUX_GROWTH','VIGNET','VIGNET_SHIFT')
# SExtarctorFlag marks whether sextractorlike header information
# was parsed
self.SExtractorFlag = False
# retrieve the comment from the data file
# hdata is the header minus the column info lines
# in case the header column info is invalid at loading hdata defaults to Fullhdata
if filename == None:
self.hdata = []
else:
self.hdata = self._load_header(filename, comment_char)
# set the number of elements
self._nentry = len(self.hdata)
def __getitem__(self, index):
"""
Defines the list operator for indexing
The method returns the indexed header entry,
if it exists. An error is raised otherwise
@param index: the index of the header entry to be returned
@type index: integer
@return: a header line
@rtype: string
"""
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# return the desired header entry
return self.hdata[index]
def __setitem__(self, index, hentry):
"""
Defines the list operator for indexed assignement
@param element: either column index or name
@type element: string/integer
@param column: the column to assign to an index
@type column: AsciiColumn
"""
# check whether the target index exists;
# raise error if not
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# split the string to lines
hitems = hentry.strip().split('\n')
# check whether more than one line
# wants to be added
if len(hitems) > 1:
raise Exception('Only one line can be set!')
# replace the header entry,
# add a newline if necessary
if hentry[-1] != '\n':
self.hdata[index] = hentry + '\n'
else:
self.hdata[index] = hentry
def __delitem__(self, index):
"""
Deletes an index.
@param index: the index of the header item to be deleted
@type index: integer
"""
# check whether the target index exists;
# raise error if not
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# delete the column
del self.hdata[index]
# adjust the number of entries
self._nentry -= 1
def __str__(self):
"""
Defines a string method for the object
@return: the string representation
@rtype: string
"""
# start the string
hstring = ''
# add the different items
for line in self.hdata:
if len(line) > 0:
hstring += self._comment_char + line
else:
hstring += self._comment_char + '\n'
# return the string
return hstring
def __iter__(self):
"""
Provide an iterator object.
The function provides and returns an interator object
for the AstroAsciiData class. Due to this iterator object
sequences like:
for column in ascii_data_object:
<do something with column>
are possible.
"""
return AsciiLenGetIter(self)
def __len__(self):
"""
The length operator
@param length: the length of the instance
@type length: integer
"""
# thats rather trivial
length = self._nentry
# return the length
return length
def append(self, hlist):
"""
Append something to the header data
@param hlist: the string to append
@type hlist: string
"""
# split the string to lines
hitems = hlist.split('\n')
# for each line
for item in hitems:
# append the new content
# to the header content
self.hdata.append(item+'\n')
self._nentry += 1
def _load_header(self, filename, comment_char):
"""
Loads the header from the data file
@param filename: the data file
@type filename: string
@param comment_char: the comment_char string
@type comment_char: string
"""
# start the item list
data = []
lastcoll,currcoll =0,0
lastname =''
# Define patterns for some common header formats
commentpattern = re.compile(comment_char)
sextractor_header = re.compile('^#\s*(\d+)\s+([+*-/()\w]+)([^\[]*)(\[\w+\])?(.*)\n')
# open the data file and go over its rows
for line in file(filename, 'r'):
if commentpattern.match(line):
#append everything after the comment_char separator to Fullhdata
line_with_comment_char_stripped_off = commentpattern.sub('',line,count=1)
self.Fullhdata.append(line_with_comment_char_stripped_off)
SEmatch = sextractor_header.match(line)
if SEmatch: #sextractor_header.match(line):
# seems we have a SExtractorheader
if not self.SExtractorFlag:
self.SExtractorFlag = True
groups = SEmatch.groups()
currcoll = int(groups[0])
name = groups[1]
if currcoll <= lastcoll:
#ignore multiple and definitions out of order
continue
if currcoll > (lastcoll +1):
# print currcoll,lastcoll
# we jumped some lines, pad CollInfo
vcounter = 1
while (lastcoll +1) < currcoll:
if lastname in self.SexVectorColls:
self.CollInfo.append({'NAME':lastname+str(vcounter)})
vcounter +=1
else:
self.CollInfo.append(None)
lastcoll +=1
self.CollInfo.append({'NAME':name})
lastcoll = currcoll
lastname = name
if groups[3]:
# a unit was extracted
self.CollInfo[-1]['UNIT'] = str(groups[3].strip('[]'))
if groups[2] or groups[4]:
self.CollInfo[-1]['COMMENT'] =''
self.CollInfo[-1]['COMMENT'] += groups[2].strip()
if groups[2] and groups[4]:
self.CollInfo[-1]['COMMENT'] += ' '
self.CollInfo[-1]['COMMENT'] += groups[4].strip()
else:
data.append(line_with_comment_char_stripped_off)
else:
# leave the file at the first
# non-comment line
break
return data
def reset(self):
"""
Reset the header
"""
self.hdata = []
self._nentry = 0
def set_comment_char(self, comment_char):
"""
Set the comment_char string
@param comment_char: the new comment_char string
@type comment_char: string
"""
self._comment_char = comment_char
def getCollInfo(self,index):
"""
Robustly return column info from header
returns (columnname,unit,comment)
@param index: The column index
@type index: int
"""
#default values
name = 'column' + str(index+1)
unit = None
comment = None
if index < len(self.CollInfo):
if self.CollInfo[index]:
if 'NAME' in self.CollInfo[index]:
name = str(self.CollInfo[index]['NAME'])
if 'UNIT' in self.CollInfo[index]:
unit = str(self.CollInfo[index]['UNIT'])
if 'COMMENT' in self.CollInfo[index]:
comment = str(self.CollInfo[index]['COMMENT'])
else:
# is the very last column in the list a known vector?
if self.CollInfo[-1]['NAME'] in self.SexVectorColls:
name = self.CollInfo[-1]['NAME']+str(index-len(self.CollInfo)+1)
# return name, unit, comment of the column
return name, unit, comment
|
spacetelescopeREPO_NAMEstsdas_strippedPATH_START.@stsdas_stripped_extracted@stsdas_stripped-master@stsdas@pkg@analysis@slitless@axe@[email protected]@.PATH_END.py
|
{
"filename": "benchmarks.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/asv/benchmarks/benchmarks.py",
"type": "Python"
}
|
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import numpy as np
from tardis.tests import montecarlo_test_wrappers as montecarlo
LINE_SIZE = 10000000
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
"""
def setup(self):
self.line = np.arange(LINE_SIZE, 1, -1).astype(np.float64)
def time_binarysearch(self):
for _ in range(LINE_SIZE):
montecarlo.binary_search_wrapper(
self.line, np.random.random() * LINE_SIZE, 0, LINE_SIZE - 1
)
def time_compute_distance2outer(self):
for _ in range(1000000):
montecarlo.compute_distance2outer_wrapper(0.0, 0.5, 1.0)
montecarlo.compute_distance2outer_wrapper(1.0, 0.5, 1.0)
montecarlo.compute_distance2outer_wrapper(0.3, 1.0, 1.0)
montecarlo.compute_distance2outer_wrapper(0.3, -1.0, 1.0)
montecarlo.compute_distance2outer_wrapper(0.5, 0.0, 1.0)
def time_compute_distance2inner(self):
for _ in range(1000000):
montecarlo.compute_distance2inner_wrapper(1.5, -1.0, 1.0)
montecarlo.compute_distance2inner_wrapper(0.0, 0.0, 0.0)
montecarlo.compute_distance2inner_wrapper(1.2, -0.7, 1.0)
def time_compute_distance2line(self):
for _ in range(1000000):
montecarlo.compute_distance2line_wrapper(
2.20866912e15,
-0.251699059004,
1.05581082105e15,
1.06020910733e15,
1693440.0,
5.90513983371e-07,
1.0602263591e15,
1.06011723237e15,
2,
)
montecarlo.compute_distance2line_wrapper(
2.23434667994e15,
-0.291130548401,
1.05581082105e15,
1.06733618121e15,
1693440.0,
5.90513983371e-07,
1.06738407486e15,
1.06732933961e15,
3,
)
def time_compute_distance2electron(self):
for _ in range(1000000):
montecarlo.compute_distance2electron_wrapper(0.0, 0.0, 2.0, 2.0)
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@asv@[email protected]@.PATH_END.py
|
{
"filename": "renderer.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/contourpy/contourpy/util/renderer.py",
"type": "Python"
}
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
import numpy as np
if TYPE_CHECKING:
import io
from numpy.typing import ArrayLike
from contourpy._contourpy import CoordinateArray, FillReturn, FillType, LineReturn, LineType
class Renderer(ABC):
"""Abstract base class for renderers."""
def _grid_as_2d(self, x: ArrayLike, y: ArrayLike) -> tuple[CoordinateArray, CoordinateArray]:
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1:
x, y = np.meshgrid(x, y)
return x, y
@abstractmethod
def filled(
self,
filled: FillReturn,
fill_type: FillType | str,
ax: Any = 0,
color: str = "C0",
alpha: float = 0.7,
) -> None:
pass
@abstractmethod
def grid(
self,
x: ArrayLike,
y: ArrayLike,
ax: Any = 0,
color: str = "black",
alpha: float = 0.1,
point_color: str | None = None,
quad_as_tri_alpha: float = 0,
) -> None:
pass
@abstractmethod
def lines(
self,
lines: LineReturn,
line_type: LineType | str,
ax: Any = 0,
color: str = "C0",
alpha: float = 1.0,
linewidth: float = 1,
) -> None:
pass
@abstractmethod
def mask(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike | np.ma.MaskedArray[Any, Any],
ax: Any = 0,
color: str = "black",
) -> None:
pass
def multi_filled(
self,
multi_filled: list[FillReturn],
fill_type: FillType | str,
ax: Any = 0,
color: str | None = None,
**kwargs: Any,
) -> None:
"""Plot multiple sets of filled contours on a single axes.
Args:
multi_filled (list of filled contour arrays): Multiple filled contour sets as returned
by :meth:`.ContourGenerator.multi_filled`.
fill_type (FillType or str): Type of filled data as returned by
:attr:`~.ContourGenerator.fill_type`, or string equivalent.
ax (int or Renderer-specific axes or figure object, optional): Which axes to plot on,
default ``0``.
color (str or None, optional): If a string color then this same color is used for all
filled contours. If ``None``, the default, then the filled contour sets use colors
from the ``tab10`` colormap in order, wrapping around to the beginning if more than
10 sets of filled contours are rendered.
kwargs: All other keyword argument are passed on to
:meth:`.Renderer.filled` unchanged.
.. versionadded:: 1.3.0
"""
if color is not None:
kwargs["color"] = color
for i, filled in enumerate(multi_filled):
if color is None:
kwargs["color"] = f"C{i % 10}"
self.filled(filled, fill_type, ax, **kwargs)
def multi_lines(
self,
multi_lines: list[LineReturn],
line_type: LineType | str,
ax: Any = 0,
color: str | None = None,
**kwargs: Any,
) -> None:
"""Plot multiple sets of contour lines on a single axes.
Args:
multi_lines (list of contour line arrays): Multiple contour line sets as returned by
:meth:`.ContourGenerator.multi_lines`.
line_type (LineType or str): Type of line data as returned by
:attr:`~.ContourGenerator.line_type`, or string equivalent.
ax (int or Renderer-specific axes or figure object, optional): Which axes to plot on,
default ``0``.
color (str or None, optional): If a string color then this same color is used for all
lines. If ``None``, the default, then the line sets use colors from the ``tab10``
colormap in order, wrapping around to the beginning if more than 10 sets of lines
are rendered.
kwargs: All other keyword argument are passed on to
:meth:`Renderer.lines` unchanged.
.. versionadded:: 1.3.0
"""
if color is not None:
kwargs["color"] = color
for i, lines in enumerate(multi_lines):
if color is None:
kwargs["color"] = f"C{i % 10}"
self.lines(lines, line_type, ax, **kwargs)
@abstractmethod
def save(self, filename: str, transparent: bool = False) -> None:
pass
@abstractmethod
def save_to_buffer(self) -> io.BytesIO:
pass
@abstractmethod
def show(self) -> None:
pass
@abstractmethod
def title(self, title: str, ax: Any = 0, color: str | None = None) -> None:
pass
@abstractmethod
def z_values(
self,
x: ArrayLike,
y: ArrayLike,
z: ArrayLike,
ax: Any = 0,
color: str = "green",
fmt: str = ".1f",
quad_as_tri: bool = False,
) -> None:
pass
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@contourpy@contourpy@[email protected]@.PATH_END.py
|
{
"filename": "pk_desi_mock_challenge_handshake.py",
"repo_name": "Samreay/Barry",
"repo_path": "Barry_extracted/Barry-master/config/desi_mock_challenge/pk_desi_mock_challenge_handshake.py",
"type": "Python"
}
|
import sys
import os
import pandas as pd
from scipy.interpolate import interp1d
from scipy.stats import norm
import numpy as np
sys.path.append("..")
from barry.datasets.dataset_power_spectrum import PowerSpectrum_DESIMockChallenge_Handshake
from barry.cosmology.camb_generator import getCambGenerator
from barry.config import setup
from barry.models import PowerSpectrumFit, PowerSeo2016, PowerBeutler2017, PowerDing2018, PowerNoda2019
from barry.samplers import DynestySampler
from barry.fitter import Fitter
if __name__ == "__main__":
pfn, dir_name, file = setup(__file__)
sampler = DynestySampler(temp_dir=dir_name, nlive=1000)
fitter = Fitter(dir_name)
cs = ["#262232", "#116A71", "#48AB75", "#D1E05B"]
d = PowerSpectrum_DESIMockChallenge_Handshake(min_k=0.005, max_k=0.3, isotropic=False, realisation="data", fit_poles=[0, 2])
fitter.add_model_and_dataset(PowerBeutler2017(isotropic=False), d, name=f"Beutler 2017 Prerecon", color=cs[0])
fitter.add_model_and_dataset(PowerSeo2016(isotropic=False), d, name=f"Seo 2016 Prerecon", color=cs[1])
fitter.add_model_and_dataset(PowerDing2018(isotropic=False), d, name=f"Ding 2018 Prerecon", color=cs[2])
fitter.add_model_and_dataset(PowerNoda2019(isotropic=False), d, name=f"Noda 2019 Prerecon", color=cs[3])
fitter.set_sampler(sampler)
fitter.set_num_walkers(10)
fitter.fit(file)
if fitter.should_plot():
import logging
logging.info("Creating plots")
res = fitter.load()
from chainconsumer import ChainConsumer
import copy
c = ChainConsumer()
for posterior, weight, chain, evidence, model, data, extra in res:
chain_conv = copy.deepcopy(chain)
chain_conv[:, 0], chain_conv[:, 2] = model.get_alphas(chain[:, 0], chain[:, 2])
parameters = model.get_labels()
parameters[0] = r"$\alpha_{par}$"
parameters[2] = r"$\alpha_{perp}$"
c.add_chain(chain_conv, weights=weight, parameters=parameters, **extra)
max_post = posterior.argmax()
ps = chain_conv[max_post, :]
if extra["name"] == "Beutler 2017 Prerecon":
for l, p in zip(parameters, ps):
print(l, p)
c.configure(shade=True, bins=20, legend_artists=True, max_ticks=4, legend_kwargs={"fontsize": 8})
truth = {"$\\alpha_{par}$": 1.0, "$\\alpha_{perp}$": 1.0}
c.plotter.plot_summary(filename=[pfn + "_summary.png", pfn + "_summary.pdf"], errorbar=True, truth=truth)
c.plotter.plot(
filename=[pfn + "_contour.png", pfn + "_contour.pdf"], truth=truth, parameters={"$\\alpha_{par}$", "$\\alpha_{perp}$"}
)
c.plotter.plot(filename=[pfn + "_contour2.png", pfn + "_contour2.pdf"], truth=truth)
c.plotter.plot_walks(filename=pfn + "_walks.png", truth=truth)
c.analysis.get_latex_table(filename=pfn + "_params.txt")
# Plots the measurements and the best-fit models from each of the models tested.
# We'll also plot the ratio for everything against the smooth model.
if True:
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.rc("text", usetex=True)
plt.rc("font", family="serif")
fig1, axes1 = plt.subplots(figsize=(5, 8), nrows=len(res), sharex=True, gridspec_kw={"hspace": 0.08})
fig2, axes2 = plt.subplots(figsize=(5, 8), nrows=len(res), sharex=True, gridspec_kw={"hspace": 0.08})
labels = [
r"$k \times P(k)\,(h^{-2}\,\mathrm{Mpc^{2}})$",
r"$k \times (P(k) - P_{\mathrm{smooth}}(k))\,(h^{-2}\,\mathrm{Mpc^{2}})$",
]
for fig, label in zip([fig1, fig2], labels):
ax = fig.add_subplot(111, frameon=False)
ax.set_ylabel(label)
ax.set_xlabel(r"$k\,(h\,\mathrm{Mpc^{-1}})$")
ax.tick_params(labelcolor="none", top=False, bottom=False, left=False, right=False)
counter = 0
for posterior, weight, chain, evidence, model, data, extra in res:
ks = data[0]["ks"]
err = np.sqrt(np.diag(data[0]["cov"]))
model.set_data(data)
p = model.get_param_dict(chain[np.argmax(posterior)])
mod = model.get_model(p, data[0])
smooth = model.get_model(p, data[0], smooth=True)
"""if extra["name"] == "Beutler 2017 Prerecon":
print(ks, mod[: len(ks)], mod[len(ks) :])
np.savetxt(
"Barry_bestfit_model.txt",
np.c_[ks, mod[: len(ks)], mod[len(ks) : 2 * len(ks)], mod[2 * len(ks) :]],
fmt="%g %g %g %g",
header="k P0 P2 P4",
)
from barry.utils import break_vector_and_get_blocks
pk_model_fit = break_vector_and_get_blocks(mod, len(data[0]["poles"]), data[0]["fit_pole_indices"])
diff = data[0]["pk"] - pk_model_fit
chi2 = diff.T @ data[0]["icov"] @ diff
print(chi2, (data[0]["num_mocks"] / 2) * np.log(1 + chi2 / (data[0]["num_mocks"] - 1)))
exit()"""
# Split up the different multipoles if we have them
if len(err) > len(ks):
assert len(err) % len(ks) == 0, f"Cannot split your data - have {len(err)} points and {len(ks)} modes"
errs = [col for col in err.reshape((-1, len(ks)))]
mods = [col for col in mod.reshape((-1, len(ks)))]
smooths = [col for col in smooth.reshape((-1, len(ks)))]
names = [f"pk{n}" for n in model.data[0]["fit_poles"]]
ax1 = fig1.add_subplot(axes1[counter])
axes = fig2.add_subplot(axes2[counter])
axes.spines["top"].set_color("none")
axes.spines["bottom"].set_color("none")
axes.spines["left"].set_color("none")
axes.spines["right"].set_color("none")
axes.tick_params(axis="both", which="both", labelcolor="none", top=False, bottom=False, left=False, right=False)
mfcs = ["#666666", "w"]
lines = ["-", "--"]
inner = gridspec.GridSpecFromSubplotSpec(1, len(names), subplot_spec=axes2[counter], wspace=0.08)
for i, (inn, err, mod, smooth, name, line, mfc) in enumerate(zip(inner, errs, mods, smooths, names, lines, mfcs)):
ax1.errorbar(ks, ks * data[0][name], yerr=ks * err, fmt="o", ms=4, c="#666666", mfc=mfc)
ax1.plot(ks, ks * mod, c=extra["color"], ls=line)
if counter != (len(res) - 1):
ax1.tick_params(axis="x", which="both", labelcolor="none", bottom=False, labelbottom=False)
ax1.annotate(extra["name"], xy=(0.98, 0.95), xycoords="axes fraction", ha="right", va="top")
ax2 = fig2.add_subplot(inn)
ax2.errorbar(ks, ks * (data[0][name] - smooth), yerr=ks * err, fmt="o", ms=4, c="#666666")
ax2.plot(ks, ks * (mod - smooth), c=extra["color"])
ax2.set_ylim(-80.0, 80.0)
if counter == 0:
if i == 0:
ax2.set_title(r"$P_{0}(k)$")
elif i == 1:
ax2.set_title(r"$P_{2}(k)$")
if counter != (len(res) - 1):
ax2.tick_params(axis="x", which="both", labelcolor="none", bottom=False, labelbottom=False)
if i != 0:
ax2.tick_params(axis="y", which="both", labelcolor="none", bottom=False, labelbottom=False)
ax2.annotate(extra["name"], xy=(0.98, 0.95), xycoords="axes fraction", ha="right", va="top")
counter += 1
fig1.savefig(pfn + "_bestfits.pdf", bbox_inches="tight", dpi=300, transparent=True)
fig1.savefig(pfn + "_bestfits.png", bbox_inches="tight", dpi=300, transparent=True)
fig2.savefig(pfn + "_bestfits_2.pdf", bbox_inches="tight", dpi=300, transparent=True)
fig2.savefig(pfn + "_bestfits_2.png", bbox_inches="tight", dpi=300, transparent=True)
|
SamreayREPO_NAMEBarryPATH_START.@Barry_extracted@Barry-master@config@desi_mock_challenge@[email protected]_END.py
|
{
"filename": "ac_calc_ind.py",
"repo_name": "gomesdasilva/ACTIN",
"repo_path": "ACTIN_extracted/ACTIN-master/actin/actin_files/ac_calc_ind.py",
"type": "Python"
}
|
#!/usr/bin/env python
# compatibility with python 2/3:
from __future__ import print_function
from __future__ import division
import os, sys ### test
import numpy as np
# ACTIN FILES
import ac_settings as ac_set
import ac_get_win
def check_lines(wave, sel_lines, verb=False):
"""
Tests if the selected lines from config file fit inside the spectrograph
wavelength range and fit inside any spectral orders for the case of 2d
spectrum.
"""
print("\nCHECKING LINES FOR WAVELENGTH RANGE AND SP. ORDERS")
print("--------------------------------------------------")
if type(wave[0]) is np.ndarray: # 2d spec
min_spec_wave = wave[0][0]
max_spec_wave = wave[-1][-1]
spec_type = '2d'
if type(wave[0]) in [np.float, np.float64]: # 1d spec
min_spec_wave = wave[0]
max_spec_wave = wave[-1]
spec_type = '1d'
# For each row (sp. line) in the config table calculate the min and max values of bandwidth
rows = len(sel_lines['ln_id'])
for k in range(rows):
ln_id = sel_lines['ln_id'][k]
ln_ctr = sel_lines['ln_ctr'][k]
ln_win = sel_lines['ln_win'][k]
if ln_win <= 0:
sys.exit("*** ERROR: line {} bandwidth is not positive.".format(ln_id))
min_wave = ln_ctr - ln_win/2.
max_wave = ln_ctr + ln_win/2.
if verb:
print("min_wave:", min_wave)
print("max_wave:", max_wave)
print("min_spec_wave:", min_spec_wave)
print("max_spec_wave:", max_spec_wave)
# Check if line fits inside spectral range
if min_wave < min_spec_wave or max_wave > max_spec_wave:
print("*** ERROR: Line {} bandwidth outside spectral range.".format(ln_id))
return False
else:
print("Line {} inside spectral range".format(ln_id))
# If wave is 2d check if line fits inside sp. order
if spec_type == '2d':
order = None
ln_ctr_orders = []
order = []
for i in range(len(wave)):
if min_wave > wave[i][0] and max_wave < wave[i][-1]:
order.append(i)
# used to show potencial orders with wavelength range
if ln_ctr > wave[i][0] and ln_ctr < wave[i][-1]:
ln_ctr_orders.append(i)
if order is None:
print("*** ERROR: Could not determine sp. order for {}".format(ln_id))
print("\tmin_wave = {:.2f}".format(min_wave))
print("\tmax_wave = {:.2f}".format(max_wave))
print("\tThe closest orders are:")
for k in range(len(ln_ctr_orders)):
closest_ord = ln_ctr_orders[k]
print("\tOrder {}: {:.2f}-{:.2f}".format(closest_ord, wave[closest_ord][0], wave[closest_ord][-1]))
sys.exit()
else:
for i in range(len(order)):
print("Line {} inside spectral order {}".format(ln_id, order[i]))
return True
def calc_flux_lines(data, sel_lines, ln_plts=False, frac=True):
"""
Calculates the sum of the flux and associated errors for all spectral lines required to calculate the selected indices.
Parameters:
-----------
data : dict
Dictionary with data returned from fits files.
The required keys are:
========== ========================================================
keys Description
---------- --------------------------------------------------------
flux list of lists : Flux per pixel per order.
wave list of lists : Wavelength calibrated for BERV and RV
(at rest frame) per pixel per order [angstroms].
blaze list of lists : Blaze function.
snr list : SNR at each spectral order.
obj str : Object identification.
date str : Date of observation in the fits file format.
========== ========================================================
sel_lines : dict
Dictionary containing the identification of the indices selected and
the parameters of the spectral lines required for each index.
The required keys are:
========== ========================================================
keys Description
---------- --------------------------------------------------------
ind_id str : Index identification.
ind_var str : Variable assigned to a given line to be used in
the index equation. Ex: 'L1', 'L2', etc, for the core
lines, and 'R1', 'R2', etc, for reference lines.
ln_id str : Spectral line identification.
ln_ctr float : Wavelength of the line centre [angstroms].
ln_win float : Bandpass around the line centre to be used in
the flux integration [angstroms].
bandtype str : Type of bandpass used to integrate flux.
========== ========================================================
ln_plts : {str, False} (optional)
Path for the saved line plots. If False, the plots are not saved
(default).
frac : bool (optional)
Use fractional pixels if 'True' (default), use integral pixels if 'False'.
Returns:
--------
sel_lines : dict
Dictionary containing the identification of the indices selected and
the parameters of the spectral lines required for each index.
Included in the output are also the keys used as input (see above).
The returned keys are:
========== ========================================================
keys Description
---------- --------------------------------------------------------
flux list : Integrated flux for each line.
error list : Errors on integrated flux for each line.
snr list : SNR for each line.
flg list : Flag for each line, 'negFlux' if negative flux
found inside any bandpass.
frac_neg list : Fraction of pixels with negative values of flux
for each line.
npixels float : Number of pixels inside the bandpass.
========== ========================================================
"""
print()
print("CALCULATING FLUX IN SELECTED LINES")
print("----------------------------------")
wave = data['wave']
flux = data['flux']
blaze = data['blaze']
obj = data['obj']
obs_date = data['obs_date']
snr = data['snr']
sel_lines['flux'] = []
sel_lines['error'] = []
sel_lines['snr'] = []
sel_lines['flg'] = []
sel_lines['frac_neg'] = []
ln_ctr = sel_lines['ln_ctr']
ln_win = sel_lines['ln_win']
ln_id = sel_lines['ln_id']
ind_var = sel_lines['ind_var']
bandtype = sel_lines['bandtype']
if 'error_pixel' in list(data):
# case where pixel errors are given in rdb file as "error_pixel"
print("Using pixel errors from input rdb file.")
err = data['error_pixel']
else: err = None
for k in range(len(ln_id)):
print()
print("Computing flux in line {}".format(ln_id[k]))
print("-----------------------{}".format('-'*len(ln_id[k])))
print("Using {} bandpass".format(bandtype[k]))
win = ac_get_win.get_win(data, ln_id[k], ln_ctr[k], ln_win[k], bandtype[k], blaze=blaze, err=err, frac=frac, ln_plts=ln_plts)
sel_lines['flux'].append(win['sum'])
sel_lines['error'].append(win['sum_err'])
sel_lines['snr'].append(win['snr'])
sel_lines['flg'].append(win['flg'])
sel_lines['frac_neg'].append(win['frac_neg'])
return sel_lines
def calc_ind(sel_lines):
"""
Calculates the indices identified in sel_lines as 'ind_id'.
sel_lines : dict
Dictionary containing the identification of the indices selected and
the parameters of the spectral lines required for each index.
The required keys are:
========== ========================================================
keys Description
---------- --------------------------------------------------------
ind_id str : Index identification.
ind_var str : Variable assigned to a given line to be used in
the index equation. Ex: 'L1', 'L2', etc, for the core
lines, and 'R1', 'R2', etc, for reference lines.
ln_id str : Spectral line identification.
flux list : Integrated flux for each line.
error list : Errors on integrated flux for each line.
snr list : SNR for each line.
flg list : Flag for each line, 'negFlux' if negative flux
found inside any bandpass.
frac_neg list : Fraction of pixels with negative values of flux
for each line.
npixels float : Number of pixels inside the bandpass.
========== ========================================================
Returns:
--------
index : dict
Dictionary containing the index values, errors and related
information.
The returned keys are:
========== ========================================================
keys Description
---------- --------------------------------------------------------
index str : Index identification.
value float : Index value.
error float : Index error.
flg {str, None} : Index flag, 'negFlux' if negative flux
found inside any bandpass.
mfrac_neg float : Maximum fraction of pixels with negative values
of flux taking into account all lines used to compute
index.
snr float : Mean SNR of all lines used to compute index.
npixels float : Number of pixels inside the bandpass.
========== ========================================================
"""
print()
print("CALCULATING INDICES")
print("-------------------")
# remove duplicates of ind_id and gives a list of selected indices
sel_ind = list(set(sel_lines['ind_id']))
sel_ind = np.asarray(sel_ind)
index = {}
index['index'] = []
index['value'] = []
index['error'] = []
index['flg'] = []
index['mfrac_neg'] = []
index['snr'] = []
print("index\tvalue\terror\t\tsnr\tflag\tmfrac_neg")
print("-----\t-----\t-----\t\t---\t----\t---------")
ind_ids = np.asarray(sel_lines['ind_id'])
rows = len(sel_lines['ln_id'])
for i in range(len(sel_ind)): # each index
var = [sel_lines['ind_var'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
flux = [sel_lines['flux'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
err = [sel_lines['error'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
flg = [sel_lines['flg'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
frac_neg = [sel_lines['frac_neg'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
snr = [sel_lines['snr'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
ln_c = [sel_lines['ln_c'][k] for k in range(rows) \
if ind_ids[k] == sel_ind[i]]
# Maximum fraction of flux with negative values of all lines in index
mfrac_neg = max(frac_neg)
if "negFlux" in flg: flg_ind = 'negFlux'
else: flg_ind = None
# Median snr of index bandpasses:
if snr is None or snr[0] is None:
snr_ind = None
else:
snr_ind = np.median(snr)
for k in range(len(var)):
if 'L' not in var[k] and 'R' not in var[k]:
msg="*** ERROR: 'ind_var' variable (in config file config_lines.txt) must start with either an 'L' for core line or 'R' for reference line. Value given was '{}'".format(var[k])
sys.exit(msg)
# Add line variables for numerator or denominator:
num = [ln_c[k]*flux[k] for k in range(len(var)) if 'L' in var[k]]
num_err = [ln_c[k]*err[k] for k in range(len(var)) if 'L' in var[k]]
denom = [ln_c[k]*flux[k] for k in range(len(var)) if 'R' in var[k]]
denom_err = [ln_c[k]*err[k] for k in range(len(var)) if 'R' in var[k]]
num = np.asarray(num)
denom = np.asarray(denom)
num_err = np.asarray(num_err)
denom_err = np.asarray(denom_err)
ind = sum(num) / sum(denom)
# Error using propagation of errors for lines and ref lines
ind_err = np.sqrt(sum(num_err**2) + ind**2 * sum(denom_err**2)) /sum(denom)
if snr_ind: snr_ind = round(snr_ind, 2)
index['index'].append(sel_ind[i])
index['value'].append(ind)
index['error'].append(ind_err)
index['flg'].append(flg_ind)
index['mfrac_neg'].append(mfrac_neg)
index['snr'].append(snr_ind)
print("{}\t{:.4f}\t{:.6f}\t{}\t{}\t{:.4f}".format(index['index'][i], index['value'][i], index['error'][i], index['snr'][i], index['flg'][i], index['mfrac_neg'][i]))
return index
|
gomesdasilvaREPO_NAMEACTINPATH_START.@ACTIN_extracted@ACTIN-master@actin@actin_files@[email protected]_END.py
|
{
"filename": "Main.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/cython/Cython/Compiler/Main.py",
"type": "Python"
}
|
#
# Cython Top Level
#
from __future__ import absolute_import
import os
import re
import sys
import io
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 3):
sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
sys.exit(1)
try:
from __builtin__ import basestring
except ImportError:
basestring = str
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
from . import Errors
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
from .Symtab import ModuleScope
from .. import Utils
from . import Options
from . import Version # legacy import needed by old PyTables versions
version = Version.version # legacy attribute - use "Cython.__version__" instead
module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
verbose = 0
standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir, 'Includes'))
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
# While Context contains every pxd ever loaded, path information etc.,
# this only contains the data related to a single compilation pass
#
# pyx ModuleNode Main code tree of this compilation.
# pxds {string : ModuleNode} Trees for the pxds used in the pyx.
# codewriter CCodeWriter Where to output final code.
# options CompilationOptions
# result CompilationResult
pass
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
# associated and imported declaration files. It includes
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : ModuleScope}
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
cython_scope = None
language_level = None # warn when not set but default to Py2
def __init__(self, include_directories, compiler_directives, cpp=False,
language_level=None, options=None):
# cython_scope is a hack, set to False by subclasses, in order to break
# an infinite loop.
# Better code organization would fix it.
from . import Builtin, CythonScope
self.modules = {"__builtin__" : Builtin.builtin_scope}
self.cython_scope = CythonScope.create_cython_scope(self)
self.modules["cython"] = self.cython_scope
self.include_directories = include_directories
self.future_directives = set()
self.compiler_directives = compiler_directives
self.cpp = cpp
self.options = options
self.pxds = {} # full name -> node tree
self._interned = {} # (type(value), value, *key_args) -> interned_value
if language_level is not None:
self.set_language_level(language_level)
self.gdb_debug_outputwriter = None
def set_language_level(self, level):
from .Future import print_function, unicode_literals, absolute_import, division
future_directives = set()
if level == '3str':
level = 3
else:
level = int(level)
if level >= 3:
future_directives.add(unicode_literals)
if level >= 3:
future_directives.update([print_function, absolute_import, division])
self.language_level = level
self.future_directives = future_directives
if level >= 3:
self.modules['builtins'] = self.modules['__builtin__']
def intern_ustring(self, value, encoding=None):
key = (EncodedString, value, encoding)
try:
return self._interned[key]
except KeyError:
pass
value = EncodedString(value)
if encoding:
value.encoding = encoding
self._interned[key] = value
return value
def intern_value(self, value, *key):
key = (type(value), value) + key
try:
return self._interned[key]
except KeyError:
pass
self._interned[key] = value
return value
# pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name):
from . import Pipeline
if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
source = CompilationSource(source_desc, module_name, os.getcwd())
result_sink = create_default_resultobj(source, self.options)
pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
result = Pipeline.run_pipeline(pipeline, source)
else:
pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
result = Pipeline.run_pipeline(pipeline, source_desc)
return result
def nonfatal_error(self, exc):
return Errors.report_error(exc)
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,
absolute_fallback=True):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % (
module_name, relative_to, pos, need_pxd))
scope = None
pxd_pathname = None
if relative_to:
if module_name:
# from .module import ...
qualified_name = relative_to.qualify_name(module_name)
else:
# from . import ...
qualified_name = relative_to.qualified_name
scope = relative_to
relative_to = None
else:
qualified_name = module_name
if not module_name_pattern.match(qualified_name):
raise CompileError(pos or (module_name, 0, 0),
"'%s' is not a valid module name" % module_name)
if relative_to:
if debug_find_module:
print("...trying relative import")
scope = relative_to.lookup_submodule(module_name)
if not scope:
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print("...trying absolute import")
if absolute_fallback:
qualified_name = module_name
scope = self
for name in qualified_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print("...scope = %s" % scope)
if not scope.pxd_file_loaded:
if debug_find_module:
print("...pxd not loaded")
if not pxd_pathname:
if debug_find_module:
print("...looking for pxd file")
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if debug_find_module:
print("......found %s" % pxd_pathname)
if not pxd_pathname and need_pxd:
# Set pxd_file_loaded such that we don't need to
# look for the non-existing pxd file next time.
scope.pxd_file_loaded = True
package_pathname = self.search_include_directories(qualified_name, ".py", pos)
if package_pathname and package_pathname.endswith('__init__.py'):
pass
else:
error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep))
if pxd_pathname:
scope.pxd_file_loaded = True
try:
if debug_find_module:
print("Context.find_module: Parsing %s" % pxd_pathname)
rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
if not pxd_pathname.endswith(rel_path):
rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
if Options.source_root:
rel_path = os.path.relpath(pxd_pathname, Options.source_root)
source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
err, result = self.process_pxd(source_desc, scope, qualified_name)
if err:
raise err
(pxd_codenodes, pxd_scope) = result
self.pxds[module_name] = (pxd_codenodes, pxd_scope)
except CompileError:
pass
return scope
def find_pxd_file(self, qualified_name, pos, sys_path=False):
# Search include path (and sys.path if sys_path is True) for
# the .pxd file corresponding to the given fully-qualified
# module name.
# Will find either a dotted filename or a file in a
# package directory. If a source file position is given,
# the directory containing the source file is searched first
# for a dotted filename, and its containing package root
# directory is searched first for a non-dotted filename.
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
self.include_directories.append(deprecated_include_path)
try:
pxd = self.search_include_directories(qualified_name, ".pxd", pos)
finally:
self.include_directories.pop()
if pxd:
name = qualified_name
if name.startswith('python'):
warning(pos, "'%s' is deprecated, use 'cpython'" % name, 1)
elif name in ('stdlib', 'stdio'):
warning(pos, "'%s' is deprecated, use 'libc.%s'" % (name, name), 1)
elif name in ('stl'):
warning(pos, "'%s' is deprecated, use 'libcpp.*.*'" % name, 1)
if pxd is None and Options.cimport_from_pyx:
return self.find_pyx_file(qualified_name, pos)
return pxd
def find_pyx_file(self, qualified_name, pos):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, "", pos,
include=True)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, qualified_name, suffix, pos,
include=False, sys_path=False):
include_dirs = self.include_directories
if sys_path:
include_dirs = include_dirs + sys.path
# include_dirs must be hashable for caching in @cached_function
include_dirs = tuple(include_dirs + [standard_include_path])
return search_include_directories(include_dirs, qualified_name,
suffix, pos, include)
def find_root_package_dir(self, file_path):
return Utils.find_root_package_dir(file_path)
def check_package_dir(self, dir, package_names):
return Utils.check_package_dir(dir, tuple(package_names))
def c_file_out_of_date(self, source_path, output_path):
if not os.path.exists(output_path):
return 1
c_time = Utils.modification_time(output_path)
if Utils.file_newer_than(source_path, c_time):
return 1
pos = [source_path]
pxd_path = Utils.replace_suffix(source_path, ".pxd")
if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
return 1
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
dep_path = self.find_pxd_file(name, pos)
elif kind == "include":
dep_path = self.search_include_directories(name, pos)
else:
continue
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
return Utils.is_package_dir(dir_path)
def read_dependency_file(self, source_path):
dep_path = Utils.replace_suffix(source_path, ".dep")
if os.path.exists(dep_path):
f = open(dep_path, "rU")
chunks = [ line.strip().split(" ", 1)
for line in f.readlines()
if " " in line.strip() ]
f.close()
return chunks
else:
return ()
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_desc, scope, pxd, full_module_name):
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
source_filename = source_desc.filename
scope.cpp = self.cpp
# Parse the given source file and return a parse tree.
num_errors = Errors.num_errors
try:
with Utils.open_source_file(source_filename) as f:
from . import Parsing
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
tree = Parsing.p_module(s, pxd, full_module_name)
if self.options.formal_grammar:
try:
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
"Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
#traceback.print_exc()
raise self._report_decode_error(source_desc, e)
if Errors.num_errors > num_errors:
raise CompileError()
return tree
def _report_decode_error(self, source_desc, exc):
msg = exc.args[-1]
position = exc.args[2]
encoding = exc.args[0]
line = 1
column = idx = 0
with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f:
for line, data in enumerate(f, 1):
idx += len(data)
if idx >= position:
column = position - (idx - len(data)) + 1
break
return error((source_desc, line, column),
"Decoding error, missing or incorrect coding=<encoding-name> "
"at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
def extract_module_name(self, path, options):
# Find fully_qualified module name from the full pathname
# of a source file.
dir, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
if "." in module_name:
return module_name
names = [module_name]
while self.is_package_dir(dir):
parent, package_name = os.path.split(dir)
if parent == dir:
break
names.append(package_name)
dir = parent
names.reverse()
return ".".join(names)
def setup_errors(self, options, result):
Errors.reset() # clear any remaining error state
if options.use_listing_file:
path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis")
else:
path = None
Errors.open_listing_file(path=path,
echo_to_stderr=options.errors_to_stderr)
def teardown_errors(self, err, options, result):
source_desc = result.compilation_source.source_desc
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
err = True
if err and result.c_file:
try:
Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
except EnvironmentError:
pass
result.c_file = None
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
else:
c_suffix = ".c"
suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
if options.output_file:
out_path = os.path.join(cwd, options.output_file)
if os.path.isdir(out_path):
return os.path.join(out_path, os.path.basename(suggested_file_name))
else:
return out_path
else:
return suggested_file_name
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
result.compilation_source = compilation_source
source_desc = compilation_source.source_desc
result.c_file = get_output_filename(source_desc.filename,
compilation_source.cwd, options)
result.embedded_metadata = options.embedded_metadata
return result
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
source_ext = os.path.splitext(source)[1]
options.configure_language_defaults(source_ext[1:]) # py/pyx
if context is None:
context = options.create_context()
# Set up source object
cwd = os.getcwd()
abs_path = os.path.abspath(source)
full_module_name = full_module_name or options.module_name or context.extract_module_name(source, options)
Utils.raise_error_if_module_name_forbidden(full_module_name)
if options.relative_path_in_code_position_comments:
rel_path = full_module_name.replace('.', os.sep) + source_ext
if not abs_path.endswith(rel_path):
rel_path = source # safety measure to prevent printing incorrect paths
else:
rel_path = abs_path
if Options.source_root:
rel_path = os.path.relpath(abs_path, Options.source_root)
source_desc = FileSourceDescriptor(abs_path, rel_path)
source = CompilationSource(source_desc, full_module_name, cwd)
# Set up result object
result = create_default_resultobj(source, options)
if options.annotate is None:
# By default, decide based on whether an html file already exists.
html_filename = os.path.splitext(result.c_file)[0] + ".html"
if os.path.exists(html_filename):
with io.open(html_filename, "r", encoding="UTF-8") as html_file:
if u'<!-- Generated by Cython' in html_file.read(100):
options.annotate = True
# Get pipeline
if source_ext.lower() == '.py' or not source_ext:
pipeline = Pipeline.create_py_pipeline(context, options, result)
else:
pipeline = Pipeline.create_pyx_pipeline(context, options, result)
context.setup_errors(options, result)
err, enddata = Pipeline.run_pipeline(pipeline, source)
context.teardown_errors(err, options, result)
if err is None and options.depfile:
from ..Build.Dependencies import create_dependency_tree
dependencies = create_dependency_tree(context).all_dependencies(result.main_source_file)
Utils.write_depfile(result.c_file, result.main_source_file, dependencies)
return result
# ------------------------------------------------------------------------
#
# Main Python entry points
#
# ------------------------------------------------------------------------
class CompilationSource(object):
"""
Contains the data necessary to start up a compilation pipeline for
a single compilation unit.
"""
def __init__(self, source_desc, full_module_name, cwd):
self.source_desc = source_desc
self.full_module_name = full_module_name
self.cwd = cwd
class CompilationOptions(object):
r"""
See default_options at the end of this module for a list of all possible
options and CmdLine.usage and CmdLine.parse_command_line() for their
meaning.
"""
def __init__(self, defaults=None, **kw):
self.include_path = []
if defaults:
if isinstance(defaults, CompilationOptions):
defaults = defaults.__dict__
else:
defaults = default_options
options = dict(defaults)
options.update(kw)
# let's assume 'default_options' contains a value for most known compiler options
# and validate against them
unknown_options = set(options) - set(default_options)
# ignore valid options that are not in the defaults
unknown_options.difference_update(['include_path'])
if unknown_options:
message = "got unknown compilation option%s, please remove: %s" % (
's' if len(unknown_options) > 1 else '',
', '.join(unknown_options))
raise ValueError(message)
directive_defaults = Options.get_directive_defaults()
directives = dict(options['compiler_directives']) # copy mutable field
# check for invalid directives
unknown_directives = set(directives) - set(directive_defaults)
if unknown_directives:
message = "got unknown compiler directive%s: %s" % (
's' if len(unknown_directives) > 1 else '',
', '.join(unknown_directives))
raise ValueError(message)
options['compiler_directives'] = directives
if directives.get('np_pythran', False) and not options['cplus']:
import warnings
warnings.warn("C++ mode forced when in Pythran mode!")
options['cplus'] = True
if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = directives['language_level']
elif not options.get('language_level'):
options['language_level'] = directive_defaults.get('language_level')
if 'formal_grammar' in directives and 'formal_grammar' not in kw:
options['formal_grammar'] = directives['formal_grammar']
if options['cache'] is True:
options['cache'] = os.path.join(Utils.get_cython_cache_dir(), 'compiler')
self.__dict__.update(options)
def configure_language_defaults(self, source_extension):
if source_extension == 'py':
if self.compiler_directives.get('binding') is None:
self.compiler_directives['binding'] = True
def create_context(self):
return Context(self.include_path, self.compiler_directives,
self.cplus, self.language_level, options=self)
def get_fingerprint(self):
r"""
Return a string that contains all the options that are relevant for cache invalidation.
"""
# Collect only the data that can affect the generated file(s).
data = {}
for key, value in self.__dict__.items():
if key in ['show_version', 'errors_to_stderr', 'verbose', 'quiet']:
# verbosity flags have no influence on the compilation result
continue
elif key in ['output_file', 'output_dir']:
# ignore the exact name of the output file
continue
elif key in ['timestamps']:
# the cache cares about the content of files, not about the timestamps of sources
continue
elif key in ['cache']:
# hopefully caching has no influence on the compilation result
continue
elif key in ['compiler_directives']:
# directives passed on to the C compiler do not influence the generated C code
continue
elif key in ['include_path']:
# this path changes which headers are tracked as dependencies,
# it has no influence on the generated C code
continue
elif key in ['working_path']:
# this path changes where modules and pxd files are found;
# their content is part of the fingerprint anyway, their
# absolute path does not matter
continue
elif key in ['create_extension']:
# create_extension() has already mangled the options, e.g.,
# embedded_metadata, when the fingerprint is computed so we
# ignore it here.
continue
elif key in ['build_dir']:
# the (temporary) directory where we collect dependencies
# has no influence on the C output
continue
elif key in ['use_listing_file', 'generate_pxi', 'annotate', 'annotate_coverage_xml']:
# all output files are contained in the cache so the types of
# files generated must be part of the fingerprint
data[key] = value
elif key in ['formal_grammar', 'evaluate_tree_assertions']:
# these bits can change whether compilation to C passes/fails
data[key] = value
elif key in ['embedded_metadata', 'emit_linenums', 'c_line_in_traceback', 'gdb_debug', 'relative_path_in_code_position_comments']:
# the generated code contains additional bits when these are set
data[key] = value
elif key in ['cplus', 'language_level', 'compile_time_env', 'np_pythran']:
# assorted bits that, e.g., influence the parser
data[key] = value
elif key == ['capi_reexport_cincludes']:
if self.capi_reexport_cincludes:
# our caching implementation does not yet include fingerprints of all the header files
raise NotImplementedError('capi_reexport_cincludes is not compatible with Cython caching')
elif key == ['common_utility_include_dir']:
if self.common_utility_include_dir:
raise NotImplementedError('common_utility_include_dir is not compatible with Cython caching yet')
else:
# any unexpected option should go into the fingerprint; it's better
# to recompile than to return incorrect results from the cache.
data[key] = value
def to_fingerprint(item):
r"""
Recursively turn item into a string, turning dicts into lists with
deterministic ordering.
"""
if isinstance(item, dict):
item = sorted([(repr(key), to_fingerprint(value)) for key, value in item.items()])
return repr(item)
return to_fingerprint(data)
class CompilationResult(object):
"""
Results from the Cython compiler:
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
api_file string or None The generated C API .h file
listing_file string or None File of error messages
object_file string or None Result of compiling the C file
extension_file string or None Result of linking the object file
num_errors integer Number of compilation errors
compilation_source CompilationSource
"""
def __init__(self):
self.c_file = None
self.h_file = None
self.i_file = None
self.api_file = None
self.listing_file = None
self.object_file = None
self.extension_file = None
self.main_source_file = None
class CompilationResultSet(dict):
"""
Results from compiling multiple Pyrex source files. A mapping
from source file paths to CompilationResult instances. Also
has the following attributes:
num_errors integer Total number of compilation errors
"""
num_errors = 0
def add(self, source, result):
self[source] = result
self.num_errors += result.num_errors
def compile_single(source, options, full_module_name = None):
"""
compile_single(source, options, full_module_name)
Compile the given Pyrex implementation file and return a CompilationResult.
Always compiles a single file; does not perform timestamp checking or
recursion.
"""
return run_pipeline(source, options, full_module_name)
def compile_multiple(sources, options):
"""
compile_multiple(sources, options)
Compiles the given sequence of Pyrex implementation files and returns
a CompilationResultSet. Performs timestamp checking and/or recursion
if these are specified in the options.
"""
if options.module_name and len(sources) > 1:
raise RuntimeError('Full module name can only be set '
'for single source compilation')
# run_pipeline creates the context
# context = options.create_context()
sources = [os.path.abspath(source) for source in sources]
processed = set()
results = CompilationResultSet()
timestamps = options.timestamps
verbose = options.verbose
context = None
cwd = os.getcwd()
for source in sources:
if source not in processed:
if context is None:
context = options.create_context()
output_filename = get_output_filename(source, cwd, options)
out_of_date = context.c_file_out_of_date(source, output_filename)
if (not timestamps) or out_of_date:
if verbose:
sys.stderr.write("Compiling %s\n" % source)
result = run_pipeline(source, options,
full_module_name=options.module_name,
context=context)
results.add(source, result)
# Compiling multiple sources in one context doesn't quite
# work properly yet.
context = None
processed.add(source)
return results
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependencies. The source argument may be a string
or a sequence of strings. If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
options = CompilationOptions(defaults = options, **kwds)
if isinstance(source, basestring) and not options.timestamps:
return compile_single(source, options, full_module_name)
else:
return compile_multiple(source, options)
@Utils.cached_function
def search_include_directories(dirs, qualified_name, suffix, pos, include=False):
"""
Search the list of include directories for the given file name.
If a source file position is given, first searches the directory
containing that file. Returns None if not found, but does not
report an error.
The 'include' option will disable package dereferencing.
"""
if pos:
file_desc = pos[0]
if not isinstance(file_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
if include:
dirs = (os.path.dirname(file_desc.filename),) + dirs
else:
dirs = (Utils.find_root_package_dir(file_desc.filename),) + dirs
dotted_filename = qualified_name
if suffix:
dotted_filename += suffix
if not include:
names = qualified_name.split('.')
package_names = tuple(names[:-1])
module_name = names[-1]
module_filename = module_name + suffix
package_filename = "__init__" + suffix
for dirname in dirs:
path = os.path.join(dirname, dotted_filename)
if os.path.exists(path):
return path
# Arcadia-specific lookup: search for packages in include paths,
# ignoring existence of __init__.py files as packages markers
# (they are not required by Arcadia build system)
if not include:
package_dir = os.path.join(dirname, *package_names)
path = os.path.join(package_dir, module_filename)
if os.path.exists(path):
return path
path = os.path.join(dirname, package_dir, module_name,
package_filename)
if os.path.exists(path):
return path
return None
# ------------------------------------------------------------------------
#
# Main command-line entry point
#
# ------------------------------------------------------------------------
def setuptools_main():
return main(command_line = 1)
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
if command_line:
from .CmdLine import parse_command_line
options, sources = parse_command_line(args)
else:
options = CompilationOptions(default_options)
sources = args
if options.show_version:
sys.stderr.write("Cython version %s\n" % version)
if options.working_path!="":
os.chdir(options.working_path)
try:
result = compile(sources, options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write(str(e) + '\n')
any_failures = 1
if any_failures:
sys.exit(1)
# ------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
# ------------------------------------------------------------------------
default_options = dict(
show_version = 0,
use_listing_file = 0,
errors_to_stderr = 1,
cplus = 0,
output_file = None,
depfile = None,
annotate = None,
annotate_coverage_xml = None,
generate_pxi = 0,
capi_reexport_cincludes = 0,
working_path = "",
timestamps = None,
verbose = 0,
quiet = 0,
compiler_directives = {},
embedded_metadata = {},
evaluate_tree_assertions = False,
emit_linenums = False,
relative_path_in_code_position_comments = True,
c_line_in_traceback = True,
language_level = None, # warn but default to 2
formal_grammar = False,
gdb_debug = False,
init_suffix = None,
compile_time_env = None,
common_utility_include_dir = None,
output_dir=None,
build_dir=None,
cache=None,
create_extension=None,
module_name=None,
np_pythran=False
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@cython@Cython@[email protected]@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "mj-will/nessai",
"repo_path": "nessai_extracted/nessai-main/src/nessai/reparameterisations/base.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Base reparameterisation
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
class Reparameterisation:
"""
Base object for reparameterisations.
Parameters
----------
parameters : str or list
Name of parameters to reparameterise.
prior_bounds : list, dict or None
Prior bounds for the parameter(s).
"""
_update = False
has_prior = False
has_prime_prior = False
requires_prime_prior = False
requires_bounded_prior = False
prior_bounds = None
prime_prior_bounds = None
one_to_one = True
def __init__(self, parameters=None, prior_bounds=None, rng=None):
if rng is None:
logger.debug("No rng specified, using the default rng.")
rng = np.random.default_rng()
self.rng = rng
if not isinstance(parameters, (str, list)):
raise TypeError("Parameters must be a str or list.")
self.parameters = (
[parameters] if isinstance(parameters, str) else parameters.copy()
)
if isinstance(prior_bounds, (list, tuple, np.ndarray)):
if len(prior_bounds) == 2:
prior_bounds = {self.parameters[0]: np.asarray(prior_bounds)}
else:
raise RuntimeError("Prior bounds got a list of len > 2")
elif prior_bounds is None:
if self.requires_bounded_prior:
raise RuntimeError(
f"Reparameterisation {self.name} requires prior bounds!"
)
else:
self.prior_bounds = None
elif not isinstance(prior_bounds, dict):
raise TypeError(
"Prior bounds must be a dict, tuple, list or numpy array"
" of len 2 or None."
)
if prior_bounds is not None:
if set(self.parameters) - set(prior_bounds.keys()):
raise RuntimeError(
"Mismatch between parameters and prior bounds: "
f"{set(self.parameters)}, {set(prior_bounds.keys())}"
)
self.prior_bounds = {
p: np.asarray(b) for p, b in prior_bounds.items()
}
else:
logger.debug(f"No prior bounds for {self.name}")
if self.requires_bounded_prior:
is_finite = np.isfinite(
[pb for pb in self.prior_bounds.values()]
).all()
if not is_finite:
raise RuntimeError(
f"Reparameterisation {self.name} requires finite prior "
f"bounds. Received: {self.prior_bounds}"
)
self.prime_parameters = [p + "_prime" for p in self.parameters]
self.requires = []
logger.debug(f"Initialised reparameterisation: {self.name}")
@property
def name(self):
"""Unique name of the reparameterisations"""
return (
self.__class__.__name__.lower() + "_" + "_".join(self.parameters)
)
def reparameterise(self, x, x_prime, log_j):
"""
Apply the reparameterisation to convert from x-space to x'-space.
Parameters
----------
x : structured array
Array of inputs
x_prime : structured array
Array to be update
log_j : array_like
Log jacobian to be updated
Returns
-------
x, x_prime : structured arrays
Update version of the x and x_prime arrays
log_j : array_like
Updated log Jacobian determinant
"""
raise NotImplementedError
def inverse_reparameterise(self, x, x_prime, log_j):
"""
Apply the reparameterisation to convert from x-space
to x'-space
Parameters
----------
x : structured array
Array
x_prime : structured array
Array to be update
log_j : array_like
Log jacobian to be updated
Returns
-------
x, x_prime : structured arrays
Update version of the x and x_prime arrays
log_j : array_like
Updated log Jacobian determinant
"""
raise NotImplementedError
def update(self, x):
"""Update the reparameterisation given some points.
Does nothing by default.
"""
pass
def reset(self):
"""Reset the reparameterisation.
Does nothing by default.
"""
pass
|
mj-willREPO_NAMEnessaiPATH_START.@nessai_extracted@nessai-main@src@nessai@[email protected]@.PATH_END.py
|
{
"filename": "test_Galactic.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/tests/test_Galactic.py",
"type": "Python"
}
|
import logging
import os
import pytest
import astropy.units as u
import pint.models.model_builder as mb
from pinttestdata import datadir
from pint import utils
import astropy.coordinates
import astropy.time
class TestGalactic:
"""Test conversion from equatorial/ecliptic -> Galactic coordinates as astropy objects"""
@classmethod
def setup_class(cls):
# J0613 is in equatorial
cls.parfileJ0613 = os.path.join(
datadir, "J0613-0200_NANOGrav_dfg+12_TAI_FB90.par"
)
cls.modelJ0613 = mb.get_model(cls.parfileJ0613)
# B1855+09 is in ecliptic
cls.parfileB1855 = os.path.join(datadir, "B1855+09_NANOGrav_9yv1.gls.par")
cls.modelB1855 = mb.get_model(cls.parfileB1855)
cls.log = logging.getLogger("TestGalactic")
def test_proper_motion(self):
"""
use the PINT and astropy proper motion calculations and compare
"""
# make a test SkyCoord object
# make sure it has obstime and distance supplied
# to use it for conversions as well
J0613_icrs = self.modelJ0613.coords_as_ICRS()
J0613_icrs_now = utils.add_dummy_distance(J0613_icrs)
newepoch = self.modelJ0613.POSEPOCH.quantity.mjd + 100
# now do it for a future epoch
J0613_icrs = self.modelJ0613.coords_as_ICRS(epoch=newepoch)
# and use the coordinates now but use astropy's space motion
print(
J0613_icrs_now.apply_space_motion(
new_obstime=astropy.time.Time(newepoch, scale="tdb", format="mjd")
)
)
J0613_icrs_now_to_then = utils.remove_dummy_distance(
J0613_icrs_now.apply_space_motion(
new_obstime=astropy.time.Time(newepoch, scale="tdb", format="mjd")
)
)
sep = J0613_icrs.separation(J0613_icrs_now_to_then)
msg = (
"Applying proper motion for +100d failed with separation %.1e arcsec"
% sep.arcsec
)
assert sep < 1e-9 * u.arcsec, msg
# make sure it can support newepoch supplied as a Time object
newepoch = astropy.time.Time(newepoch, format="mjd")
J0613_icrs = self.modelJ0613.coords_as_ICRS(epoch=newepoch)
J0613_icrs_now_to_then = utils.remove_dummy_distance(
J0613_icrs_now.apply_space_motion(new_obstime=newepoch)
)
sep = J0613_icrs.separation(J0613_icrs_now_to_then)
msg = (
"Applying proper motion for +100d failed with separation %.1e arcsec"
% sep.arcsec
)
assert sep < 1e-9 * u.arcsec, msg
def test_proper_motion_identity(self):
# sanity check that evaluation at POSEPOCH returns something very close to 0
J0613_icrs = self.modelJ0613.coords_as_ICRS()
J0613_icrs_alt = self.modelJ0613.coords_as_ICRS(
epoch=self.modelJ0613.POSEPOCH.quantity.mjd
)
sep = J0613_icrs.separation(J0613_icrs_alt)
assert sep < 1e-11 * u.arcsec
def test_equatorial_to_galactic(self):
"""
start with a pulsar in equatorial coordinates
convert to Galactic and make sure the positions are consistent
then apply the space motion to the equatorial object & convert to Galactic
compare that to the Galactic object w/ space motion
"""
# make a test SkyCoord object
# make sure it has obstime and distance supplied
# to use it for conversions as well
J0613_icrs = self.modelJ0613.coords_as_ICRS()
J0613_icrs_now = utils.add_dummy_distance(J0613_icrs)
J0613_galactic = self.modelJ0613.coords_as_GAL()
J0613_galactic_now = utils.add_dummy_distance(J0613_galactic)
newepoch = self.modelJ0613.POSEPOCH.quantity.mjd + 100
# what I get converting within astropy
J0613_galactic_comparison = J0613_icrs_now.transform_to(
astropy.coordinates.Galactic
)
sep = J0613_galactic_now.separation(J0613_galactic_comparison)
msg = (
"Equatorial to Galactic conversion for now failed with separation %.1e arcsec"
% sep.arcsec
)
assert sep < 1e-9 * u.arcsec, msg
J0613_icrs = self.modelJ0613.coords_as_ICRS(epoch=newepoch)
# what I get converting within astropy
J0613_galactic_comparison = J0613_icrs.transform_to(
astropy.coordinates.Galactic
)
J0613_galactic_then = utils.remove_dummy_distance(
J0613_galactic_now.apply_space_motion(
new_obstime=astropy.time.Time(newepoch, scale="tdb", format="mjd")
)
)
sep = J0613_galactic_then.separation(J0613_galactic_comparison)
msg = (
"Equatorial to Galactic conversion for +100d failed with separation %.1e arcsec"
% sep.arcsec
)
assert sep < 1e-9 * u.arcsec, msg
def test_ecliptic_to_galactic(self):
"""
start with a pulsar in ecliptic coordinates
convert to Galactic and make sure the positions are consistent
then apply the space motion to the ecliptic object & convert to Galactic
compare that to the Galactic object w/ space motion
"""
# make a test SkyCoord object
# make sure it has obstime and distance supplied
# to use it for conversions as well
B1855_ECL = self.modelB1855.coords_as_ECL()
B1855_ECL_now = utils.add_dummy_distance(B1855_ECL)
B1855_galactic = self.modelB1855.coords_as_GAL()
B1855_galactic_now = utils.add_dummy_distance(B1855_galactic)
newepoch = self.modelB1855.POSEPOCH.quantity.mjd + 100
# what I get converting within astropy
B1855_galactic_comparison = B1855_ECL_now.transform_to(
astropy.coordinates.Galactic
)
sep = B1855_galactic_now.separation(B1855_galactic_comparison)
msg = (
"Ecliptic to Galactic conversion for now failed with separation %.1e arcsec"
% sep.arcsec
)
assert sep < 1e-9 * u.arcsec, msg
B1855_ECL = self.modelB1855.coords_as_ECL(epoch=newepoch)
# what I get converting within astropy
B1855_galactic_comparison = B1855_ECL.transform_to(astropy.coordinates.Galactic)
B1855_galactic_then = utils.remove_dummy_distance(
B1855_galactic_now.apply_space_motion(
new_obstime=astropy.time.Time(newepoch, scale="tdb", format="mjd")
)
)
sep = B1855_galactic_then.separation(B1855_galactic_comparison)
msg = (
"Ecliptic to Galactic conversion for +100d failed with separation %.1e arcsec"
% sep.arcsec
)
assert sep < 1e-9 * u.arcsec, msg
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@tests@[email protected]_END.py
|
{
"filename": "chunked_dataset.py",
"repo_name": "ML4GW/ml4gw",
"repo_path": "ml4gw_extracted/ml4gw-main/ml4gw/dataloading/chunked_dataset.py",
"type": "Python"
}
|
from collections.abc import Iterable
import torch
from ml4gw.types import WaveformTensor
class ChunkedTimeSeriesDataset(torch.utils.data.IterableDataset):
"""
Wrapper dataset that will loop through chunks of timeseries
data produced by another iterable and sample windows from
these chunks.
Args:
chunk_it:
Iterator which will produce chunks of timeseries
data to sample windows from. Should have shape
`(N, C, T)`, where `N` is the number of chunks
to sample from, `C` is the number of channels,
and `T` is the number of samples along the
time dimension for each chunk.
kernel_size:
Size of windows to be sampled from each chunk.
Should be less than the size of each chunk
along the time dimension.
batch_size:
Number of windows to sample at each iteration
batches_per_chunk:
Number of batches of windows to sample from
each chunk before moving on to the next one.
Sampling fewer batches from each chunk means
a lower likelihood of sampling duplicate windows,
but an increase in chunk-loading overhead.
coincident:
Whether the windows sampled from individual
channels in each batch element should be
sampled coincidentally, i.e. consisting of
the same timesteps, or whether each window
should be sample independently from the others.
device:
Which device chunks should be moved to upon loading.
"""
def __init__(
self,
chunk_it: Iterable,
kernel_size: float,
batch_size: int,
batches_per_chunk: int,
coincident: bool = True,
device: str = "cpu",
) -> None:
self.chunk_it = chunk_it
self.kernel_size = kernel_size
self.batch_size = batch_size
self.batches_per_chunk = batches_per_chunk
self.coincident = coincident
self.device = device
def __len__(self) -> int:
return len(self.chunk_it) * self.batches_per_chunk
def __iter__(self) -> WaveformTensor:
it = iter(self.chunk_it)
chunk = next(it)
num_chunks, num_channels, chunk_size = chunk.shape
# if we're sampling coincidentally, we only need
# to sample indices on a per-batch-element basis.
# Otherwise, we'll need indices for both each
# batch sample _and_ each channel with each sample
if self.coincident:
sample_size = (self.batch_size,)
else:
sample_size = (self.batch_size, num_channels)
# slice kernels out a flattened chunk tensor
# index-for-index. We'll account for batch/
# channel indices by introducing offsets later on
idx = torch.arange(self.kernel_size, device=self.device)
idx = idx.view(1, 1, -1)
idx = idx.repeat(self.batch_size, num_channels, 1)
# this will just be a set of aranged channel indices
# repeated to offset the kernel indices in the
# flattened chunk tensor
channel_idx = torch.arange(num_channels, device=self.device)
channel_idx = channel_idx.view(1, -1, 1)
channel_idx = channel_idx.repeat(self.batch_size, 1, self.kernel_size)
idx += channel_idx * chunk_size
while True:
# record the number of rows in the chunk, then
# flatten it to make it easier to slice
if chunk_size < self.kernel_size:
raise ValueError(
"Can't sample kernels of size {} from chunk "
"with size {}".format(self.kernel_size, chunk_size)
)
chunk = chunk.reshape(-1)
# generate batches from the current chunk
for _ in range(self.batches_per_chunk):
# first sample the indices of which chunk elements
# we're going to read batch elements from
chunk_idx = torch.randint(
0, num_chunks, size=sample_size, device=self.device
)
# account for the offset this batch element
# introduces in the flattened array
chunk_idx *= num_channels * chunk_size
chunk_idx = chunk_idx.view(self.batch_size, -1, 1)
chunk_idx = chunk_idx + idx
# now sample the start index within each chunk
# element we're going to grab our time windows from
time_idx = torch.randint(
0,
chunk_size - self.kernel_size,
size=sample_size,
device=self.device,
)
time_idx = time_idx.view(self.batch_size, -1, 1)
# there's no additional offset factor to account for here
chunk_idx += time_idx
# now slice this 3D tensor from our flattened chunk
yield chunk[chunk_idx]
try:
chunk = next(it)
except StopIteration:
break
num_chunks, num_channels, chunk_size = chunk.shape
|
ML4GWREPO_NAMEml4gwPATH_START.@ml4gw_extracted@ml4gw-main@ml4gw@dataloading@[email protected]_END.py
|
{
"filename": "LRS2_reduction_README.md",
"repo_name": "BrianaLane/LRS2_reduction",
"repo_path": "LRS2_reduction_extracted/LRS2_reduction-master/LRS2_reduction_README.md",
"type": "Markdown"
}
|
==================================
# Instructions for LRS2 Reduction
==================================
- This file contains instructions for running CURE based reductions for LRS2 using the python script reduction_wrapper_lrs2.py
- For more information about CURE refer to the the CURE page on the HETDEX wiki:
https://luna.mpe.mpg.de/wikihetdex/index.php/Cure_-_the_data_analysis_system_for_HETDEX
- All LRS2 data can be found on TACC's supercomputer Maverick inside the following directory:
/work/03946/hetdex/maverick/
- It is recommended you run your reductions on TACC because CURE and python builds already exit.
- You will also not have to download and store data onto your computer.
- However instructions are also provided for setting up reduction on your own computer if you wish to do so.
- NOTE: whether you run reduction on TACC or your own computer you will need a TACC account to access your data.
=================================================
# Understanding your data and the data stucture
=================================================
------------------------------
1) Data structure on Maverick:
------------------------------
- On Maverick, inside the data folder (/work/03946/hetdex/maverick/) you will see all of the date folders
- Each date folder contains all of the data taken that night.
- Inside each data folder are folders for the instruments used that night on the HET. Your data will be in 'lrs2'
- Inside the lrs2 folder are folders for all of the observations taken that night (cals and science targets) with lrs2
- Inside each observation folder there are folders for each exposure taken for that object (ex. exp01)
- Inside each exposure folder there is a folder called 'lrs2' and that contains the 4 or 8 fits files for that exposure
- An exposure with one LRS2 unit gives you 4 fits files. There are two detectors in each unit and each has 2 amplifiers.
- If you observed with both units (red and blue) you will have 8 fits files per exposure.
-------------------------------
1) Information from file names:
-------------------------------
- Here is an example filename: 20161202T074404.6_056LL_cmp.fits
- The part before the first underscore (20161202T074404.6) is the date and time the image was taken (the 'T' separates the date and time)
(format: yyyymmddThhmmss.s)
- The 3 digit number after the first underscore (056) tells you the IFU slot ID for the unit. This tells you which LRS2 unit this data is from
LRS2-Blue: 056
LRS2-Red : 066
- The next lettter will either be 'L' or 'R' for left or right. This tells you if this is the left or right side of the unit or which channel.
LRS2-B left side : UV channel (056L)
LRS2-B right side: orange channel (056R)
LRS2-R left side : red channel (066L)
LRS2-R right side: far-red channel (066R)
- The letter after that will either be 'L' or 'U' for lower or upper. This tells with amplifier of that detector it is.
* NOTE: basic reduction will orient U and L properly and combine them.
* So after reduction instead of LU, LL, RU, RL you will just have L and R images.
- The part after the second underscore tells you the image type. There are 6 image types:
zro - bias
twi - twilight flats
flt - flats (taken with either the LDLS or Qth lamps)
cmp - arc lamps or comps (taken with either Cd, Hg, FeAr, or Kr)
drk - darks
sci - science frames
================================================
# Running LRS2 reduction on TACC - RECOMMENDED
================================================
-------------------------------------------------------
1) Getting a TACC account and joining the HETDEX group:
-------------------------------------------------------
- If you don't have an account on TACC you need to set one up.
go to: https://portal.tacc.utexas.edu/
Then, click: Create a TACC account
- Send Karl ([email protected]) your username so he can add you to the HETDEX group.
- All data for LRS2 and VIRUS is stored on the supercomputer Maverick. Everything will be found and run on here.
>>> ssh [email protected]
- After ssh'ing into Maverick, you will be at your home page.
---------------------------------------------
2) Setting up TACC account to run reductions:
---------------------------------------------
- CURE and all python packages needed for reduction are already on TACC.
- You must first set up these paths in you "~/.bashrc" file in your account on Maverick.
- Open your "~/.bashrc" file in your prefered text editor and add the following lines at the end:
(NOTE: The last 3 are optional but are useful for running VIRUS reduction scripts or viewing fits files)
umask 022
module load intel/15.0.3 cxx11
export WORKON_HOME=$HOME/.virtualenvs
export PROJECT_HOME=$HOME/Devel
export PATH="/home/00115/gebhardt/anaconda2/bin:/work/03946/hetdex/maverick/bin:$PATH"
export CURELRS2="/home/04195/bindahl/curelrs2/cure/bin"
export CUREBIN="/home/03946/hetdex/curehome/cure/bin"
alias ds9=/work/02426/ngaffney/saods9/bin/ds9
alias QFitsView='/home/01821/papovich/qfitsview/QFitsView'
- Save the file and exit the text editor
- Source your "~/.bashrc" file to make the changes
>>> source ~/.bashrc
- Change permissions of your home directory (cd) for accesibility
>>> cd
>>> chmod a+rx ../username
------------------------------------------------
3) Obtaining reduction scripts and config files:
------------------------------------------------
- Inside your work directory (cdw) make a copy of the LRS2_reduction folder with the following commands:
>>> cdw
>>> cp -r /home/04195/bindahl/LRS2_reduction ./
* If you prefer you can also clone the directory straight from my git hub
>>> git clone https://github.com/BrianaLane/LRS2_reduction.git
- You should now have a folder in your work directory called LRS2_reduction. This folder contains the following files and folder:
1. __reduction_wrapper_lrs2.py__ - This is the script that runs the reductions. You should never have to edit this file.
2. __lrs2_config.py__ - This is the config file where the user defines the data and opts for their reduction
3. __cosmics.py__ - This is the script that runs L.A.comsic in the reduction (http://obswww.unige.ch/~tewes/cosmics_dot_py/)
4. __lrs2_config__ - This is a folder that contains all of the configuration files needed for LRS2 reduction
* __lines_files__ - These files define the pixel and wavelength to find the arc lines for building the wavelength solution
* __mapping_files__ - These files contain the mapping of the fibers onto the field for building data cubes
* __pixel_flats__ - These files are the pixels flats for each CCD that can be optionally divided during reduction
* __longExpCals__ - These are 1800sec FeAr exposures used for pinning down the wavelength solution for the far-red channel
* __short_OrgFlts__ - These replace longer exposure flats in early LRS2 data that were saturating the orange channel
--------------------------
4) Running LRS2 reduction:
--------------------------
- open your lrs2_config.py file in a text editor
- edit fields and paths according to the comments to define your data to be reduced:
* choose the steps of reduction to run, and opts for those steps
- save the changes to lrs2_config.py
- run reduction_wrapper_lrs2.py (if run outside of LRS2_reduction you must give the path to reduction_wrapper_lrs2.py)
>>> python reduction_wrapper_lrs2.py
- When the reduction script runs it will automatically save a copy of the config file used in the redux directory
- You can also choose to run the reduction script with a different config file
* For example you could make your own config file copies with settings you want to save
* or you can use the saved config files in one of your redux directories.
- To run the reduction script with a different config file:
* Run the script with the following command
>>> python reduction_wrapper_lrs2.py -config "path/name_of_config.py"
----------------------------
5) Upadating LRS2 reduction:
----------------------------
- I recommend periodically making sure your version of the LRS2 reduction software is up to date
- Changes to the script can be tracked on my LRS2_reduction github: https://github.com/BrianaLane/LRS2_reduction
- If you copy the entire LRS2_reduction directory to your work folder you can pull changes made to your version
- To update your LRS2_reduction version in your work directory:
>>> cd LRS2_reduction
>>> git pull
- Edits you may have made to lrs2_config.py may be incompatable with the new version you are trying to pull
- You can disregard the changes to pull in the update with the following command inside your LRS2_reduction directory:
>>> git stash
>>> git pull
- Before you do a 'git stash' you might want to save a copy of your lrs2_config.py file to a different name.
- Any file with a name other than those listed in section 3 will be unaffected by a 'git stash' or 'git pull'
===============================================
# Running LRS2 reduction on your own computer
===============================================
-------------------------------------------
1) Obtaining and configuring CURE for LRS2:
-------------------------------------------
- If you don't already have CURE installed you have to do that first
- Instructions for installing CURE can be found in the CURE cookbook on the HETDEX wiki (link at top of page)
- Once CURE is installed you have to edit specconf.h to set up CURE for LRS2 data
- Open specconf.h in a text editor (file found inside cure/libcure/)
- Edit line 34 to define LRS2 instead of VIRUS_HET (#define LRS2). Save file and exit
- You must recompile CURE to make the change
- cd into your cure directory and run the following
>>> make clean
>>> make install
- Add this to your "~/.bashrc" file to set the path to your CURE bin:
export CURELRS2="/path_to_folder_containing_CURE/cure/bin"
------------------------------------
2) Python build and packages needed:
------------------------------------
- You need to have Python 2.7 or later installed
- You need the following Python packages: numpy, scipy, pyfits, glob
-------------------------------------
3) Obtaining LRS2 data from Maverick:
-------------------------------------
- You must copy your data off of Maverick from /work/03946/hetdex/maverick/ onto your computer
- It is important that you maintain the same folder structure which is:
date_folder/lrs2/lrs000####/exp##/lrs2/*.fits
- When running the reduction you will be defining the path to the date_folder in which to find your data
---------------------------------------------------------------
4) Obtaining reduction script/files and running LRS2 reduction:
---------------------------------------------------------------
- Refer to sections 3 and 4 under 'Running LRS2 reduction on TACC' above
- The one difference is you will copy the LRS2_reduction directory onto your computer instead of copying it into your TACC directory
=========================================
# Understanding reduction data products
=========================================
-------------------------------------------
1) Files + folders in your redux directory:
-------------------------------------------
- You will see a ton of files that are all of the master files for the arcs, flats, biases, and darks.
- There are also mastertrace files with .pmod, .fmod, .dist, ect. These are the data products from deformer needed to run later reduction steps
- If you choose CLEAN_AFTER_DONE to be True then the folders cmp, flt, zro, and drk will be empty
* otherwise they are filled with the intermediate files from reducing calibration data to build master files
- The folder sci contains all of your reduced science images, sky subtracted files, fiber extracted files, data cubes, and collapsed cubes
----------------------
2) Your science files:
----------------------
- Refer to 'Understanding your data and the data stucture' to understand the filename convention
- You will see a bunch of versions of your science files with different prefixs appended to there name
- After each step of the reduction a letter is added to the filename as follows:
pses - These are files that have been run through basic reduction
S - These are files that have also been sky subtracted
Fe - These are files that have been fiber extracted WITHOUT wavelength resampling
FeR - These are files that have been fiber extracted WITH wavelength resampling
Cu - These files are data cubes
Col - These files are collapsed data cubes
e. - These are the error files for all of these frames
- As an example for the file: CuFeRSpses20160731T094822.4_056_sci_R.fits
- The pses shows that it has been through basic reduction (pses), sky subtracted (S),
fiber extracted with wavelength resampling (FeR), and then build into a data cube (Cu)
- The error file for this frame would be: e.CuFeRSpses20160731T094822.4_056_sci_R.fits
-----------------------------------
2) Finding the Wavelength Solution:
-----------------------------------
- If you have run fiber extraction with wavelength resampling then a wavelength solution can be found in the headers of the FeR files
- Inside the header of your fiber extracted files you need the keywords 'CRVAL1' and 'CRDELT1'.
CRVAL1 - gives you the wavelength of the first pixel
CRDELT1 - gives you the angstroms per pixel
- These can be used to reconstruct the wavelength array for your spectra.
|
BrianaLaneREPO_NAMELRS2_reductionPATH_START.@LRS2_reduction_extracted@LRS2_reduction-master@[email protected]_END.py
|
{
"filename": "_openpyxl.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/io/excel/_openpyxl.py",
"type": "Python"
}
|
from __future__ import annotations
import mmap
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import doc
from pandas.core.shared_docs import _shared_docs
from pandas.io.excel._base import (
BaseExcelReader,
ExcelWriter,
)
from pandas.io.excel._util import (
combine_kwargs,
validate_freeze_panes,
)
if TYPE_CHECKING:
from openpyxl import Workbook
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.styles import Fill
from pandas._typing import (
ExcelWriterIfSheetExists,
FilePath,
ReadBuffer,
Scalar,
StorageOptions,
WriteExcelBuffer,
)
class OpenpyxlWriter(ExcelWriter):
_engine = "openpyxl"
_supported_extensions = (".xlsx", ".xlsm")
def __init__( # pyright: ignore[reportInconsistentConstructor]
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions | None = None,
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
super().__init__(
path,
mode=mode,
storage_options=storage_options,
if_sheet_exists=if_sheet_exists,
engine_kwargs=engine_kwargs,
)
# ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
# the file and later write to it
if "r+" in self._mode: # Load from existing workbook
from openpyxl import load_workbook
try:
self._book = load_workbook(self._handles.handle, **engine_kwargs)
except TypeError:
self._handles.handle.close()
raise
self._handles.handle.seek(0)
else:
# Create workbook object with default optimized_write=True.
try:
self._book = Workbook(**engine_kwargs)
except TypeError:
self._handles.handle.close()
raise
if self.book.worksheets:
self.book.remove(self.book.worksheets[0])
@property
def book(self) -> Workbook:
"""
Book instance of class openpyxl.workbook.Workbook.
This attribute can be used to access engine-specific features.
"""
return self._book
@property
def sheets(self) -> dict[str, Any]:
"""Mapping of sheet names to sheet objects."""
result = {name: self.book[name] for name in self.book.sheetnames}
return result
def _save(self) -> None:
"""
Save workbook to disk.
"""
self.book.save(self._handles.handle)
if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
# truncate file to the written content
self._handles.handle.truncate()
@classmethod
def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object.
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {"borders": "border"}
style_kwargs: dict[str, Serialisable] = {}
for k, v in style_dict.items():
k = _style_key_map.get(k, k)
_conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object.
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object.
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
"sz": "size",
"b": "bold",
"i": "italic",
"u": "underline",
"strike": "strikethrough",
"vertalign": "vertAlign",
}
font_kwargs = {}
for k, v in font_dict.items():
k = _font_key_map.get(k, k)
if k == "color":
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict: dict[str, Any]) -> Fill:
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object.
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import (
GradientFill,
PatternFill,
)
_pattern_fill_key_map = {
"patternType": "fill_type",
"patterntype": "fill_type",
"fgColor": "start_color",
"fgcolor": "start_color",
"bgColor": "end_color",
"bgcolor": "end_color",
}
_gradient_fill_key_map = {"fill_type": "type"}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = _pattern_fill_key_map.get(k)
gk = _gradient_fill_key_map.get(k)
if pk in ["start_color", "end_color"]:
v = cls._convert_to_color(v)
if gk == "stop":
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object.
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {"border_style": "style"}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
k = _side_key_map.get(k, k)
if k == "color":
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object.
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
border_kwargs = {}
for k, v in border_dict.items():
k = _border_key_map.get(k, k)
if k == "color":
v = cls._convert_to_color(v)
if k in ["left", "right", "top", "bottom", "diagonal"]:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict["format_code"]
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def _write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
) -> None:
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache: dict[str, dict[str, Serialisable]] = {}
if sheet_name in self.sheets and self._if_sheet_exists != "new":
if "r+" in self._mode:
if self._if_sheet_exists == "replace":
old_wks = self.sheets[sheet_name]
target_index = self.book.index(old_wks)
del self.book[sheet_name]
wks = self.book.create_sheet(sheet_name, target_index)
elif self._if_sheet_exists == "error":
raise ValueError(
f"Sheet '{sheet_name}' already exists and "
f"if_sheet_exists is set to 'error'."
)
elif self._if_sheet_exists == "overlay":
wks = self.sheets[sheet_name]
else:
raise ValueError(
f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
"Valid options are 'error', 'new', 'replace' and 'overlay'."
)
else:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
if validate_freeze_panes(freeze_panes):
freeze_panes = cast(tuple[int, int], freeze_panes)
wks.freeze_panes = wks.cell(
row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1, column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs: dict[str, Serialisable] | None = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1,
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
class OpenpyxlReader(BaseExcelReader["Workbook"]):
@doc(storage_options=_shared_docs["storage_options"])
def __init__(
self,
filepath_or_buffer: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions | None = None,
engine_kwargs: dict | None = None,
) -> None:
"""
Reader using openpyxl engine.
Parameters
----------
filepath_or_buffer : str, path object or Workbook
Object to be parsed.
{storage_options}
engine_kwargs : dict, optional
Arbitrary keyword arguments passed to excel engine.
"""
import_optional_dependency("openpyxl")
super().__init__(
filepath_or_buffer,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
@property
def _workbook_class(self) -> type[Workbook]:
from openpyxl import Workbook
return Workbook
def load_workbook(
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
) -> Workbook:
from openpyxl import load_workbook
default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}
return load_workbook(
filepath_or_buffer,
**(default_kwargs | engine_kwargs),
)
@property
def sheet_names(self) -> list[str]:
return [sheet.title for sheet in self.book.worksheets]
def get_sheet_by_name(self, name: str):
self.raise_if_bad_sheet_by_name(name)
return self.book[name]
def get_sheet_by_index(self, index: int):
self.raise_if_bad_sheet_by_index(index)
return self.book.worksheets[index]
def _convert_cell(self, cell) -> Scalar:
from openpyxl.cell.cell import (
TYPE_ERROR,
TYPE_NUMERIC,
)
if cell.value is None:
return "" # compat with xlrd
elif cell.data_type == TYPE_ERROR:
return np.nan
elif cell.data_type == TYPE_NUMERIC:
val = int(cell.value)
if val == cell.value:
return val
return float(cell.value)
return cell.value
def get_sheet_data(
self, sheet, file_rows_needed: int | None = None
) -> list[list[Scalar]]:
if self.book.read_only:
sheet.reset_dimensions()
data: list[list[Scalar]] = []
last_row_with_data = -1
for row_number, row in enumerate(sheet.rows):
converted_row = [self._convert_cell(cell) for cell in row]
while converted_row and converted_row[-1] == "":
# trim trailing empty elements
converted_row.pop()
if converted_row:
last_row_with_data = row_number
data.append(converted_row)
if file_rows_needed is not None and len(data) >= file_rows_needed:
break
# Trim trailing empty rows
data = data[: last_row_with_data + 1]
if len(data) > 0:
# extend rows to max width
max_width = max(len(data_row) for data_row in data)
if min(len(data_row) for data_row in data) < max_width:
empty_cell: list[Scalar] = [""]
data = [
data_row + (max_width - len(data_row)) * empty_cell
for data_row in data
]
return data
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@io@excel@[email protected]_END.py
|
{
"filename": "plot_IT1.py",
"repo_name": "galtay/sphray",
"repo_path": "sphray_extracted/sphray-master/analysis/plot_IT1.py",
"type": "Python"
}
|
import sphray_io
import numpy as np
import matplotlib.pyplot as plt
#====================================================================
# define input parameters
#====================================================================
snapdir = "../../sphray_output/IT1_N64/r6"
snapbase = "snap"
snapnum = 1
snapnumstr = '{0:03}'.format( snapnum )
sfile = snapdir + "/" + snapbase + "_" + snapnumstr
pngfile = "T1x" + snapnumstr + ".png"
cmpfile= "CmpData/CmpT1_" + snapnumstr + "x.txt"
print
print "comparison project file:", cmpfile
print "sphray output file:", sfile
print "png file if doing file output:", pngfile
print
#====================================================================
# read sphray snapshot
#====================================================================
# create a file object and read data
#---------------------------------------------------
sf = sphray_io.SphrayFile()
shead = sf.read_header(sfile)
sdata = sf.read_data_1(sfile)
# sort particles by radius
#---------------------------------------------------
sdata['pos'] = sdata['pos'] - shead['boxlen'][0]/2
sdata['rad'] = np.sqrt( sdata['pos'][:,0]**2 +
sdata['pos'][:,1]**2 +
sdata['pos'][:,2]**2 )
print 'min/max pos: ', sdata['pos'].min(), sdata['pos'].max()
print 'min/max rad: ', sdata['rad'].min(), sdata['rad'].max()
sdata = sf.convert_data_to_structured_array(sdata)
sdata.sort(order='rad')
# bin particles radially with an equal number of
# particles per bin
#---------------------------------------------------
nbins = 1000
ngas = sdata['rad'].size
nperbin = ngas / nbins
class SphRadialAverage:
pass
rav = SphRadialAverage()
rav.xx = np.zeros( nbins )
rav.xHI_mean = np.zeros( nbins )
rav.xHI_median = np.zeros( nbins )
for i in xrange(nbins):
ii = i * nperbin
ff = ii + nperbin
if i==nbins-1:
ff = ngas
rads = sdata['rad'][ii:ff]
xHI = sdata['xHI'][ii:ff]
rav.xx[i] = np.mean(rads)
rav.xHI_mean[i] = np.mean( np.log10(xHI) )
rav.xHI_median[i] = np.median( np.log10(xHI) )
#====================================================================
# read comparison project data
#====================================================================
codes = ['xx', 'c2ray', 'otvet', 'crash', 'rsph', 'art', 'ftte',
'simplex', 'zeus', 'flash', 'ift']
Ncmpbins = 100
cdata_pre = np.loadtxt( cmpfile )
cdata={}
for i,c in enumerate(codes):
ii = i * Ncmpbins
ff = ii + 100
dat = cdata_pre[ii:ff]
cdata[c] = dat
#====================================================================
# make plot
#====================================================================
# define linestyles
#--------------------------------------
from itertools import cycle
lines = [
{'color':'blue', 'ls':'-'},
{'color':'green', 'ls':'-'},
{'color':'red', 'ls':'-'},
{'color':'gold', 'ls':'-'},
{'color':'cyan', 'ls':'-'},
{'color':'purple', 'ls':'-'},
{'color':'black', 'ls':'-'},
{'color':'red', 'ls':'--'},
{'color':'green', 'ls':'--'},
]
linecycler = cycle(lines)
# set up figure
#--------------------------------------
fig = plt.figure( figsize=(10,10) )
ax = fig.add_subplot(111)
# plot SPHRAY results
#--------------------------------------
ax.scatter( sdata['rad']/(shead['boxlen'][0]/2),
np.log10( sdata['xHI'] ), s=30,
facecolors='grey', edgecolors='grey', alpha=0.1)
ax.scatter( sdata['rad']/(shead['boxlen'][0]/2),
np.log10( 1.0-sdata['xHI'] ), s=30,
facecolors='grey', edgecolors='grey', alpha=0.1)
ax.plot( rav.xx / (shead['boxlen'][0]/2),
rav.xHI_median, color='lime', lw=3.0,
alpha=0.7, zorder=4)
ax.plot( rav.xx / (shead['boxlen'][0]/2),
np.log10( 1.0 - 10**rav.xHI_median),
color='lime', lw=3.0,
alpha=0.7, zorder=4)
# plot comparison project results
#--------------------------------------
for i,c in enumerate(codes[1:]):
this_line = next(linecycler)
color = this_line['color']
ls = this_line['ls']
ax.plot( cdata['xx'], np.log10( cdata[c] ), label=c,
lw=2.0, color=color, ls=ls )
ax.plot( cdata['xx'], np.log10( 1.0-cdata[c] ),
lw=2.0, color=color, ls=ls )
# finalize plot
#--------------------------------------
ax.legend(loc='lower center', ncol=2)
ax.set_xlabel( r'$r/L_{\rm box}$', fontsize=20 )
ax.set_xlim( -0.05, 1.05 )
ax.set_ylabel( r'$x_{\rm HI}$', fontsize=20 )
ax.set_ylim( -5.3, 0.1 )
fig.savefig( pngfile )
|
galtayREPO_NAMEsphrayPATH_START.@sphray_extracted@sphray-master@analysis@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/stackers/__init__.py",
"type": "Python"
}
|
from .base_stacker import *
from .coord_stackers import *
from .date_stackers import *
from .general_stackers import *
from .get_col_info import *
from .label_stackers import *
from .m5_optimal_stacker import *
from .mo_stackers import *
from .n_follow_stacker import *
from .neo_dist_stacker import *
from .sdss_stackers import *
from .sn_stacker import *
from .teff_stacker import *
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@stackers@[email protected]_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/_legendgrouptitle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="legendgrouptitle", parent_name="scatter", **kwargs):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "lmytime/MyFilter",
"repo_path": "MyFilter_extracted/MyFilter-main/README.md",
"type": "Markdown"
}
|
# MyFilter
[](https://doi.org/10.5281/zenodo.10210201)
MyFilter is a web app that provides interactive visualization of astronomical filters' transmission curves.
## Demo
A live demo of the app can be viewed here:
https://preview.lmytime.com/myfilter
JWST NIRCam customized version:
https://preview.lmytime.com/myfilter?cus=JWST
<img src="demo.png" width="100%">
## Features
- Interactive transmission curves for common astronomical filters
- Overlay multiple filters for comparison
- Zoom in on specific wavelength ranges
- Check emission or absoption lines for any redshift
## Usage
The app is simple to use. Just select the filters you want to visualize from the sidebar. Hover over the graph for more details and use the mouse to zoom in on areas of interest.
Multiple filters can be overlaid to compare bandpasses.
## Deployment
We provide a way to deploy using docker on your own.
First pull the docker image:
```sh
docker pull lmytime/myfilter
```
Then run the docker image as container:
```sh
docker run -it --rm -p 9899:9899 lmytime/myfilter
```
Finally, you can open the software on:
- `http://127.0.0.1:9899`
## Development
The app is built based on:
- [Vue.js](https://vuejs.org/)
- [dygraphs](https://dygraphs.com/) for data visualization
- [ElementPlus](https://element-plus.org/) for styling
- [Flask](https://flask.palletsprojects.com/)
- The filter data is from the [SVO](http://svo2.cab.inta-csic.es/theory/fps/index.php) website.
- Some data are from users:
- `JWST_xxx`: xxx represents MIRI, NIRISS, NIRCam, and NIRSpec. These data are from JWST ETC v2.0.
- `DECam-Merian`: Filters designed in [Merian Survey](https://merian.sites.ucsc.edu/).
- `MOIRCS`: Subaru MOIRCS
## Contributing
Contributions are welcome! Please create an issue or open a pull request if you would like to add a feature or fix a bug.
## License
This project is open source and available under the MIT License.
## Acknowledgment
[](https://doi.org/10.5281/zenodo.10210201)
If you publish any work that uses MyFilter, please cite the software by linking to the Zenodo DOI as follows:
In `bibtex`:
```bibtex
@misc{MyFilter,
author = {Li, Mingyu},
title = {{MyFilter: A Web Application for Interactive Visualization of Astronomical Filter Transmission Curves}},
month = nov,
year = 2023,
publisher = {Zenodo},
version = {1.0.0},
doi = {10.5281/zenodo.10210201},
url = {https://doi.org/10.5281/zenodo.10210201}
}
```
Optionally, it is preferred to attach the Github repo link in the footnote:
```latex
\footnote{\url{https://github.com/lmytime/MyFilter}}
```
|
lmytimeREPO_NAMEMyFilterPATH_START.@MyFilter_extracted@[email protected]@.PATH_END.py
|
{
"filename": "delete_jobs.py",
"repo_name": "CobayaSampler/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/grid_tools/delete_jobs.py",
"type": "Python"
}
|
import subprocess
import shutil
from . import batchjob_args, jobqueue
def delete_jobs(args=None):
opts = batchjob_args.BatchArgs('Delete running or queued jobs', importance=True,
batchPathOptional=True)
group = opts.parser.add_mutually_exclusive_group()
group.add_argument('--queued', action='store_true')
group.add_argument('--running', action='store_true')
opts.parser.add_argument('--delete_id_min', type=int)
opts.parser.add_argument('--delete_id_range', nargs=2, type=int)
opts.parser.add_argument('--delete_ids', nargs='+', type=int)
opts.parser.add_argument('--confirm', action='store_true')
(batch, args) = opts.parseForBatch(args)
if batch:
if args.delete_id_range is not None:
jobqueue.deleteJobs(args.batchPath, jobId_minmax=args.delete_id_range,
confirm=args.confirm)
if args.delete_id_min is not None:
jobqueue.deleteJobs(args.batchPath, jobId_min=args.delete_id_min,
confirm=args.confirm)
elif args.delete_ids is not None:
jobqueue.deleteJobs(args.batchPath, args.delete_ids, confirm=args.confirm)
else:
items = [jobItem for jobItem in opts.filteredBatchItems()]
batchNames = set(
[jobItem.name for jobItem in items] + [jobItem.name + '_minimize' for
jobItem in items])
jobqueue.deleteJobs(args.batchPath, rootNames=batchNames,
confirm=args.confirm)
if not args.confirm:
print('jobs not actually deleted: add --confirm to really cancel them')
else:
ids = []
if args.delete_id_range is not None:
ids = list(range(args.delete_id_range[0], args.delete_id_range[1] + 1))
elif args.delete_ids is not None:
ids += args.delete_ids
elif args.name is not None:
jobqueue.deleteJobs(args.batchPath, rootNames=args.name)
return
else:
print('Must give --delete_id_range, --delete_ids or --name '
'if no batch directory')
for engine in jobqueue.grid_engine_defaults:
qdel = jobqueue.engine_default(engine, 'qdel')
if shutil.which(qdel) is not None:
for jobId in ids:
subprocess.check_output(qdel + ' ' + str(jobId), shell=True)
break
if __name__ == "__main__":
delete_jobs()
|
CobayaSamplerREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@grid_tools@[email protected]_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/box/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertemplatesrc", parent_name="box", **kwargs):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@box@[email protected]_END.py
|
{
"filename": "load.py",
"repo_name": "silkemaes/MACE",
"repo_path": "MACE_extracted/MACE-main/src/mace/load.py",
"type": "Python"
}
|
'''
This script loads a trained MACE model
and provides the user with the possibility to apply (test) the model on a test dataset.
The class Trained_MACE() contains the loaded model,
together with the training and test losses, and the meta data.
'''
import src.mace.utils as utils
import src.mace.loss as loss
import matplotlib.pyplot as plt
class Trained_MACE():
'''
Class to load a trained MACE model.
'''
def __init__(self, outloc, dirname, epoch = -1):
'''
Load all the components of a MACE model.
Input:
- outloc: output location
- dirname: name of the directory
- epoch: specific epoch to load
if epoch == -1, the last epoch is loaded (i.e. final state of the model)
if epoch >= 0, the epoch*10's epoch is loaded
Returns:
- meta: file with meta data
- model: torch model
- trainloss: training loss per epoch
- testloss: test loss per epoch
'''
self.loc = outloc+dirname+'/'
self.epoch = epoch
self.meta = utils.load_meta(self.loc)
self.model, self.num_params = utils.load_model(self.loc, self.meta, epoch)
self.trainloss = loss.LoadedLoss(self.loc, self.meta, 'train')
self.testloss = loss.LoadedLoss(self.loc, self.meta, 'valid')
self.plotpath = self.loc + 'figs/'
utils.makeOutputDir(self.plotpath)
def get_meta(self):
return self.meta
def get_model(self):
return self.model
def get_trainloss(self):
return self.trainloss
def get_testloss(self):
return self.testloss
def get_num_params(self):
return self.num_params
def get_loc(self):
return self.loc
def get_epoch(self):
return self.epoch
def plot_loss(self, log=True, ylim=False, limits=None, save = True):
len = self.get_meta()['epochs']
fig = loss.plot(self.trainloss, self.testloss, len, log = log, ylim = ylim, limits = limits)
if save == True:
plt.savefig(self.plotpath+'loss.png')
plt.show()
return fig
|
silkemaesREPO_NAMEMACEPATH_START.@MACE_extracted@MACE-main@src@[email protected]@.PATH_END.py
|
{
"filename": "_yhoverformat.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/mesh3d/_yhoverformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YhoverformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="yhoverformat", parent_name="mesh3d", **kwargs):
super(YhoverformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@mesh3d@[email protected]_END.py
|
{
"filename": "_line.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scatter3d.marker", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color` is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color` is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color` is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color` is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical
array. The colorscale must be an array
containing arrays mapping a normalized value to
an rgb, rgba, hex, hsl, hsv, or named color
string. At minimum, a mapping for the lowest
(0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use
`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Blackbody,Bl
uered,Blues,Cividis,Earth,Electric,Greens,Greys
,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viri
dis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color` is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scatter3d@marker@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "CobayaSampler/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/planck_2015_plikHM_TT/__init__.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import Planck2015Clik
class planck_2015_plikHM_TT(Planck2015Clik):
r"""
High-$\ell$ temperature-only likelihood (binned) of Planck's 2015 data release
\cite{Aghanim:2015xee}.
"""
pass
|
CobayaSamplerREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@planck_2015_plikHM_TT@[email protected]_END.py
|
{
"filename": "bbox_transform.py",
"repo_name": "chenwuperth/rgz_rcnn",
"repo_path": "rgz_rcnn_extracted/rgz_rcnn-master/lib/fast_rcnn/bbox_transform.py",
"type": "Python"
}
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
#
# Modified by Chen Wu ([email protected])
# --------------------------------------------------------
import numpy as np
def bbox_transform(ex_rois, gt_rois):
"""
ex_rois: either anchor_rois for anchor_target_layer
or rpn_rois for proposal_target_layer
so RPN is treated as anchor in the 2nd case
gt_rois: ground-truth_roi
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def bbox_contains(bx1, bx2, delta=4):
"""
bx1 and bx2 are 1D array
does bx1 fully contains bx2
delta is with respect with the scaled image (i.e. 600 x 600)
"""
xmin_1 = bx1[0]
ymin_1 = bx1[1]
xmax_1 = bx1[2]
ymax_1 = bx1[3]
xmin_2 = bx2[0]
ymin_2 = bx2[1]
xmax_2 = bx2[2]
ymax_2 = bx2[3]
# does bx1 fully contains box2?
if (xmin_2 - xmin_1 > delta):
if (xmax_1 - xmax_2 > delta):
if (ymin_2 - ymin_1 > delta):
if (ymax_1 - ymax_2 > delta):
return True
# elif (xmin_1 - xmin_2 > delta): #does bx2 fully contains box1?
# if (xmax_2 - xmax_1 > delta):
# if (ymin_1 - ymin_2 > delta):
# if (ymax_2 - ymax_1 > delta):
# return True
return False
|
chenwuperthREPO_NAMErgz_rcnnPATH_START.@rgz_rcnn_extracted@rgz_rcnn-master@lib@fast_rcnn@[email protected]_END.py
|
{
"filename": "parameters_validator.py",
"repo_name": "Kamuish/archi",
"repo_path": "archi_extracted/archi-master/pyarchi/utils/misc/parameters_validator.py",
"type": "Python"
}
|
import os
def parameters_validator(**kwargs):
"""
Loads the configuration parameters from the .yaml file. After loading,
it checks some of the parameters to see if they have valid values.
Parameters
----------
parameters
Returns
-------
"""
wrong_params = []
warnings = []
paths_to_test = ["base_folder", "optimized_factors", "results_folder"]
for key in paths_to_test:
if not os.path.exists(kwargs[key]):
wrong_params.append(key)
if kwargs["official_curve"] not in ["DEFAULT", "OPTIMAL", "RINF", "RSUP"]:
wrong_params.append(kwargs["official_curve"])
if kwargs['data_type'] not in ['real', 'simulated']:
wrong_params.append(kwargs['data_type'])
for param in ["method", "detect_mode", "initial_detect"]:
if type(kwargs[param]) is not str:
wrong_params.append(param)
continue
if "+" in kwargs[param]:
modes = kwargs[param].split("+")
else:
modes = [kwargs[param]]
if param == "method":
valid_values = ["circle", "shape"]
if param == "detect_mode":
valid_values = ["dynam", "offsets", "static"]
elif param == "initial_detect":
valid_values = ["dynam", "fits"]
for md in modes:
if md not in valid_values and param not in wrong_params:
wrong_params.append(param)
if not isinstance(kwargs["val_range"], list):
if kwargs["optimize"]:
wrong_params.append("val_range")
else:
warnings.append("val_range")
else:
optim_vals = kwargs["val_range"]
if optim_vals[1] <= optim_vals[0] < 0 or optim_vals[1] < 0:
if kwargs["optimize"]:
wrong_params.append("val_range")
else:
warnings.append("val_range")
if (kwargs["grid_bg"] / 200) % 2 != 1 and kwargs["grid_bg"] != 0:
wrong_params.append("grid_bg")
return wrong_params, warnings, kwargs
|
KamuishREPO_NAMEarchiPATH_START.@archi_extracted@archi-master@pyarchi@utils@misc@[email protected]_END.py
|
{
"filename": "_templateitemname.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/gauge/axis/tickformatstop/_templateitemname.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="indicator.gauge.axis.tickformatstop",
**kwargs,
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@indicator@gauge@axis@tickformatstop@[email protected]_END.py
|
{
"filename": "paths.py",
"repo_name": "ChrisBoettner/plato",
"repo_path": "plato_extracted/plato-main/plato/utils/paths.py",
"type": "Python"
}
|
import os
def get_abspath() -> str:
"""
Get the absolute path of the plato (main) directory.
Returns
-------
str
Absolute path of the plato (main) directory.
"""
return (
os.path.abspath(__file__)[: os.path.abspath(__file__).find("plato")] + "plato/"
)
|
ChrisBoettnerREPO_NAMEplatoPATH_START.@plato_extracted@plato-main@plato@[email protected]@.PATH_END.py
|
{
"filename": "CPU.py",
"repo_name": "classULDM/class.VFDM",
"repo_path": "class.VFDM_extracted/class.VFDM-master/CPU.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
.. module:: CPU
:synopsis: CPU, a CLASS Plotting Utility
.. moduleauthor:: Benjamin Audren <[email protected]>
.. credits:: Benjamin Audren, Jesus Torrado
.. version:: 2.0
This is a small python program aimed to gain time when comparing two spectra,
e.g. from CAMB and CLASS, or a non-linear spectrum to a linear one.
It is designed to be used in a command line fashion, not being restricted to
your CLASS directory, though it recognizes mainly CLASS output format. Far from
perfect, or complete, it could use any suggestion for enhancing it,
just to avoid losing time on useless matters for others.
Be warned that, when comparing with other format, the following is assumed:
there are no empty line (especially at the end of file). Gnuplot comment lines
(starting with a # are allowed). This issue will cause a non-very descriptive
error in CPU, any suggestion for testing it is welcome.
Example of use:
- To superimpose two different spectra and see their global shape :
python CPU.py output/lcdm_z2_pk.dat output/lncdm_z2_pk.dat
- To see in details their ratio:
python CPU.py output/lcdm_z2_pk.dat output/lncdm_z2_pk.dat -r
The "PlanckScale" is taken with permission from Jesus Torrado's:
cosmo_mini_toolbox, available under GPLv3 at
https://github.com/JesusTorrado/cosmo_mini_toolbox
"""
from __future__ import unicode_literals, print_function
# System imports
import os
import sys
import argparse
# Numerics
import numpy as np
from numpy import ma
from scipy.interpolate import InterpolatedUnivariateSpline
from math import floor
# Plotting
import matplotlib.pyplot as plt
from matplotlib import scale as mscale
from matplotlib.transforms import Transform
from matplotlib.ticker import FixedLocator
def CPU_parser():
parser = argparse.ArgumentParser(
description=(
'CPU, a CLASS Plotting Utility, specify wether you want\n'
'to superimpose, or plot the ratio of different files.'),
epilog=(
'A standard usage would be, for instance:\n'
'python CPU.py output/test_pk.dat output/test_pk_nl_density.dat'
' -r\npython CPU.py output/wmap_cl.dat output/planck_cl.dat'),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'files', type=str, nargs='*', help='Files to plot')
parser.add_argument('-r', '--ratio', dest='ratio', action='store_true',
help='Plot the ratio of the spectra')
parser.add_argument('-y', '--y-axis', dest='y_axis', nargs='+',
help='specify the fields you want to plot.')
parser.add_argument('-x', '--x-axis', dest='x_axis', type=str,
help='specify the field to be used on the x-axis')
parser.add_argument('--scale', type=str,
choices=['lin', 'loglog', 'loglin', 'george'],
help='Specify the scale to use for the plot')
parser.add_argument('--xlim', dest='xlim', nargs='+', type=float,
default=[], help='Specify the x range')
parser.add_argument('--ylim', dest='ylim', nargs='+', type=float,
default=[], help='Specify the y range')
parser.add_argument(
'-p, --print',
dest='printfile', default='',
help=('print the graph directly in a file. If no name is specified, it'
'uses the name of the first input file'))
parser.add_argument(
'--repeat',
dest='repeat', action='store_true', default=False,
help='repeat the step for all redshifts with same base name')
return parser
def plot_CLASS_output(files, x_axis, y_axis, ratio=False, printing='',
output_name='', extension='', x_variable='',
scale='lin', xlim=[], ylim=[]):
"""
Load the data to numpy arrays, write all the commands for plotting to a
Python script for further refinment, and display them.
Inspired heavily by the matlab version by Thomas Tram
Parameters
----------
files : list
List of files to plot
x-axis : string
name of the column to use as the x coordinate
y-axis : list, str
List of items to plot, which should match the way they appear in the
file, for instance: ['TT', 'BB]
Keyword Arguments
-----------------
ratio : bool
If set to yes, plots the ratio of the files, taking as a reference the
first one
output_name : str
Specify a different name for the produced figure (by default, it takes
the name of the first file, and replace the .dat by .pdf)
extension : str
"""
# Define the python script name, and the pdf path
python_script_path = os.path.splitext(files[0])[0]+'.py'
# The variable text will contain all the lines to be printed in the end to
# the python script path, joined with newline characters. Beware of the
# indentation.
text = ['import matplotlib.pyplot as plt',
'import numpy as np',
'import itertools', '']
# Load all the graphs
data = []
for data_file in files:
data.append(np.loadtxt(data_file))
# Create the full_path_files list, that contains the absolute path, so that
# the future python script can import them directly.
full_path_files = [os.path.abspath(elem) for elem in files]
text += ['files = %s' % full_path_files]
text += ['data = []',
'for data_file in files:',
' data.append(np.loadtxt(data_file))']
# Recover the base name of the files, everything before the dot
roots = [elem.split(os.path.sep)[-1].split('.')[0] for elem in files]
text += ['roots = [%s]' % ', '.join(["'%s'" % root for root in roots])]
# Create the figure and ax objects
fig, ax = plt.subplots()
text += ['', 'fig, ax = plt.subplots()']
# if ratio is not set, then simply plot them all
original_y_axis = y_axis
legend = []
if not ratio:
for index, curve in enumerate(data):
# Recover the number of columns in the first file, as well as their
# title.
num_columns, names, tex_names = extract_headers(files[index])
text += ['', 'index, curve = %i, data[%i]' % (index, index)]
# Check if everything is in order
if num_columns == 2:
y_axis = [names[1]]
elif num_columns > 2:
# in case y_axis was only a string, cast it to a list
if isinstance(original_y_axis, str):
y_axis = [original_y_axis]
else:
y_axis = original_y_axis
# Store the selected text and tex_names to the script
selected = []
for elem in y_axis:
selected.extend(
[name for name in names if name.find(elem) != -1 and
name not in selected])
if not y_axis:
selected = names[1:]
y_axis = selected
# Decide for the x_axis, by default the index will be set to zero
x_index = 0
if x_axis:
for index_name, name in enumerate(names):
if name.find(x_axis) != -1:
x_index = index_name
break
# Store to text
text += ['y_axis = %s' % selected]
text += ['tex_names = %s' % [elem for (elem, name) in
zip(tex_names, names) if name in selected]]
text += ["x_axis = '%s'" % tex_names[x_index]]
text += ["ylim = %s" % ylim]
text += ["xlim = %s" % xlim]
for selec in y_axis:
index_selec = names.index(selec)
plot_line = 'ax.'
if scale == 'lin':
plot_line += 'plot(curve[:, %i], curve[:, %i])' % (
x_index, index_selec)
ax.plot(curve[:, x_index], curve[:, index_selec])
elif scale == 'loglog':
plot_line += 'loglog(curve[:, %i], abs(curve[:, %i]))' % (
x_index, index_selec)
ax.loglog(curve[:, x_index], abs(curve[:, index_selec]))
elif scale == 'loglin':
plot_line += 'semilogx(curve[:, %i], curve[:, %i])' % (
x_index, index_selec)
ax.semilogx(curve[:, x_index], curve[:, index_selec])
elif scale == 'george':
plot_line += 'plot(curve[:, %i], curve[:, %i])' % (
x_index, index_selec)
ax.plot(curve[:, x_index], curve[:, index_selec])
ax.set_xscale('planck')
text += [plot_line]
legend.extend([roots[index]+': '+elem for elem in y_axis])
ax.legend(legend, loc='best')
text += ["",
"ax.legend([root+': '+elem for (root, elem) in",
" itertools.product(roots, y_axis)], loc='best')",
""]
else:
ref = data[0]
num_columns, ref_curve_names, ref_tex_names = extract_headers(files[0])
# Check if everything is in order
if num_columns == 2:
y_axis_ref = [ref_curve_names[1]]
elif num_columns > 2:
# in case y_axis was only a string, cast it to a list
if isinstance(original_y_axis, str):
y_axis_ref = [original_y_axis]
else:
y_axis_ref = original_y_axis
# Store the selected text and tex_names to the script
selected = []
for elem in y_axis_ref:
selected.extend([name for name in ref_curve_names if name.find(elem) != -1 and
name not in selected])
y_axis_ref = selected
# Decide for the x_axis, by default the index will be set to zero
x_index_ref = 0
if x_axis:
for index_name, name in enumerate(ref_curve_names):
if name.find(x_axis) != -1:
x_index_ref = index_name
break
for idx in range(1, len(data)):
current = data[idx]
num_columns, names, tex_names = extract_headers(files[idx])
# Check if everything is in order
if num_columns == 2:
y_axis = [names[1]]
elif num_columns > 2:
# in case y_axis was only a string, cast it to a list
if isinstance(original_y_axis, str):
y_axis = [original_y_axis]
else:
y_axis = original_y_axis
# Store the selected text and tex_names to the script
selected = []
for elem in y_axis:
selected.extend([name for name in names if name.find(elem) != -1 and
name not in selected])
y_axis = selected
text += ['y_axis = %s' % selected]
text += ['tex_names = %s' % [elem for (elem, name) in
zip(tex_names, names) if name in selected]]
# Decide for the x_axis, by default the index will be set to zero
x_index = 0
if x_axis:
for index_name, name in enumerate(names):
if name.find(x_axis) != -1:
x_index = index_name
break
text += ["x_axis = '%s'" % tex_names[x_index]]
for selec in y_axis:
# Do the interpolation
axis = ref[:, x_index_ref]
reference = ref[:, ref_curve_names.index(selec)]
#plt.loglog(current[:, x_index], current[:, names.index(selec)])
#plt.show()
#interpolated = splrep(current[:, x_index],
#current[:, names.index(selec)])
interpolated = InterpolatedUnivariateSpline(current[:, x_index],
current[:, names.index(selec)])
if scale == 'lin':
#ax.plot(axis, splev(ref[:, x_index_ref],
#interpolated)/reference-1)
ax.plot(axis, interpolated(ref[:, x_index_ref])/reference-1)
elif scale == 'loglin':
#ax.semilogx(axis, splev(ref[:, x_index_ref],
#interpolated)/reference-1)
ax.semilogx(axis, interpolated(ref[:, x_index_ref])/reference-1)
elif scale == 'loglog':
raise InputError(
"loglog plot is not available for ratios")
if 'TT' in names:
ax.set_xlabel('$\ell$', fontsize=16)
text += ["ax.set_xlabel('$\ell$', fontsize=16)"]
elif 'P' in names:
ax.set_xlabel('$k$ [$h$/Mpc]', fontsize=16)
text += ["ax.set_xlabel('$k$ [$h$/Mpc]', fontsize=16)"]
else:
ax.set_xlabel(tex_names[x_index], fontsize=16)
text += ["ax.set_xlabel('%s', fontsize=16)" % tex_names[x_index]]
if xlim:
if len(xlim) > 1:
ax.set_xlim(xlim)
text += ["ax.set_xlim(xlim)"]
else:
ax.set_xlim(xlim[0])
text += ["ax.set_xlim(xlim[0])"]
ax.set_ylim()
text += ["ax.set_ylim()"]
if ylim:
if len(ylim) > 1:
ax.set_ylim(ylim)
text += ["ax.set_ylim(ylim)"]
else:
ax.set_ylim(ylim[0])
text += ["ax.set_ylim(ylim[0])"]
text += ['plt.show()']
plt.show()
# If the use wants to print the figure to a file
if printing:
fig.savefig(printing)
text += ["fig.savefig('%s')" % printing]
# Write to the python file all the issued commands. You can then reproduce
# the plot by running "python output/something_cl.dat.py"
with open(python_script_path, 'w') as python_script:
print('Creating a python script to reproduce the figure')
print('--> stored in %s' % python_script_path)
python_script.write('\n'.join(text))
# If the use wants to print the figure to a file
if printing:
fig.savefig(printing)
class FormatError(Exception):
"""Format not recognised"""
pass
class TypeError(Exception):
"""Spectrum type not recognised"""
pass
class NumberOfFilesError(Exception):
"""Invalid number of files"""
pass
class InputError(Exception):
"""Incompatible input requirements"""
pass
def replace_scale(string):
"""
This assumes that the string starts with "(.)", which will be replaced by
(8piG/3)
>>> print replace_scale('(.)toto')
>>> '(8\\pi G/3)toto'
"""
string_list = list(string)
string_list.pop(1)
string_list[1:1] = list('8\\pi G/3')
return ''.join(string_list)
def process_long_names(long_names):
"""
Given the names extracted from the header, return two arrays, one with the
short version, and one tex version
>>> names, tex_names = process_long_names(['(.)toto', 'proper time [Gyr]'])
>>> print names
>>> ['toto', 'proper time']
>>> print tex_names
>>> ['(8\\pi G/3)toto, 'proper time [Gyr]']
"""
names = []
tex_names = []
# First pass, to remove the leading scales
for name in long_names:
# This can happen in the background file
if name.startswith('(.)', 0):
temp_name = name[3:]
names.append(temp_name)
tex_names.append(replace_scale(name))
# Otherwise, we simply
else:
names.append(name)
tex_names.append(name)
# Finally, remove any extra spacing
names = [''.join(elem.split()) for elem in names]
return names, tex_names
def extract_headers(header_path):
with open(header_path, 'r') as header_file:
header = [line for line in header_file if line[0] == '#']
header = header[-1]
# Count the number of columns in the file, and recover their name. Thanks
# Thomas Tram for the trick
indices = [i+1 for i in range(len(header)) if
header.startswith(':', i)]
num_columns = len(indices)
long_names = [header[indices[i]:indices[(i+1)]-3].strip() if i < num_columns-1
else header[indices[i]:].strip()
for i in range(num_columns)]
# Process long_names further to handle special cases, and extract names,
# which will correspond to the tags specified in "y_axis".
names, tex_names = process_long_names(long_names)
return num_columns, names, tex_names
def main():
print('~~~ Running CPU, a CLASS Plotting Utility ~~~')
parser = CPU_parser()
# Parse the command line arguments
args = parser.parse_args()
# if there are no argument in the input, print usage
if len(args.files) == 0:
parser.print_usage()
return
# if the first file name contains cl or pk, infer the type of desired
# spectrum
if not args.y_axis:
if args.files[0].rfind('cl') != -1:
scale = 'loglog'
elif args.files[0].rfind('pk') != -1:
scale = 'loglog'
else:
scale = 'lin'
args.y_axis = []
else:
scale = ''
if not args.scale:
if scale:
args.scale = scale
else:
args.scale = 'lin'
# Remove extra spacing in the y_axis list
args.y_axis = [''.join(elem.split()) for elem in args.y_axis]
# If ratio is asked, but only one file was passed in argument, politely
# complain
if args.ratio:
if len(args.files) < 2:
raise NumberOfFilesError(
"If you want me to compute a ratio between two files, "
"I strongly encourage you to give me at least two of them.")
# actual plotting. By default, a simple superposition of the graph is
# performed. If asked to be divided, the ratio is shown - whether a need
# for interpolation arises or not.
if args.ratio and args.scale == 'loglog':
print("Defaulting to loglin scale")
args.scale = 'loglin'
plot_CLASS_output(args.files, args.x_axis, args.y_axis,
ratio=args.ratio, printing=args.printfile,
scale=args.scale, xlim=args.xlim, ylim=args.ylim)
# Helper code from cosmo_mini_toolbox, by Jesus Torrado, available fully at
# https://github.com/JesusTorrado/cosmo_mini_toolbox, to use the log then
# linear scale for the multipole axis when plotting Cl.
nonpos = "mask"
change = 50.0
factor = 500.
def _mask_nonpos(a):
"""
Return a Numpy masked array where all non-positive 1 are
masked. If there are no non-positive, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
def _clip_smaller_than_one(a):
a[a <= 0.0] = 1e-300
return a
class PlanckScale(mscale.ScaleBase):
"""
Scale used by the Planck collaboration to plot Temperature power spectra:
base-10 logarithmic up to l=50, and linear from there on.
Care is taken so non-positive values are not plotted.
"""
name = 'planck'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(
FixedLocator(
np.concatenate((np.array([2, 10, change]),
np.arange(500, 2500, 500)))))
axis.set_minor_locator(
FixedLocator(
np.concatenate((np.arange(2, 10),
np.arange(10, 50, 10),
np.arange(floor(change/100), 2500, 100)))))
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self.PlanckTransform(nonpos)
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class PlanckTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self, nonpos):
Transform.__init__(self)
if nonpos == 'mask':
self._handle_nonpos = _mask_nonpos
else:
self._handle_nonpos = _clip_nonpos
def transform_non_affine(self, a):
lower = a[np.where(a<=change)]
greater = a[np.where(a> change)]
if lower.size:
lower = self._handle_nonpos(lower * 10.0)/10.0
if isinstance(lower, ma.MaskedArray):
lower = ma.log10(lower)
else:
lower = np.log10(lower)
lower = factor*lower
if greater.size:
greater = (factor*np.log10(change) + (greater-change))
# Only low
if not(greater.size):
return lower
# Only high
if not(lower.size):
return greater
return np.concatenate((lower, greater))
def inverted(self):
return PlanckScale.InvertedPlanckTransform()
class InvertedPlanckTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def transform_non_affine(self, a):
lower = a[np.where(a<=factor*np.log10(change))]
greater = a[np.where(a> factor*np.log10(change))]
if lower.size:
if isinstance(lower, ma.MaskedArray):
lower = ma.power(10.0, lower/float(factor))
else:
lower = np.power(10.0, lower/float(factor))
if greater.size:
greater = (greater + change - factor*np.log10(change))
# Only low
if not(greater.size):
return lower
# Only high
if not(lower.size):
return greater
return np.concatenate((lower, greater))
def inverted(self):
return PlanckTransform()
# Finished. Register the scale!
mscale.register_scale(PlanckScale)
if __name__ == '__main__':
sys.exit(main())
| |
{
"filename": "3_spectra_workbook_nrs2.ipynb",
"repo_name": "Exo-TiC/ExoTiC-JEDI",
"repo_path": "ExoTiC-JEDI_extracted/ExoTiC-JEDI-master/notebooks/padawan/3_spectra_workbook_nrs2.ipynb",
"type": "Jupyter Notebook"
}
|
# NIRSpec G395H Pipeline Workbook
## ERS WASP 39b NRS2
```python
# %matplotlib nbagg
```
```python
workbook = 'insert tag for plots'
```
```python
import numpy as np
import matplotlib.pyplot as plt
import glob
from astropy.io import fits
import pickle
import exotic_jedi as jedi
```
```python
# Setting some rc params
plt.rcParams['figure.figsize'] = [10.0, 3.0] # Dimensions
plt.rcParams['figure.dpi'] = 300 # Resolution
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['image.aspect'] = 7 # Aspect ratio
plt.rcParams['lines.linewidth'] = 1
cmap = plt.cm.magma
cmap.set_bad('k',1.)
from matplotlib import cycler
plt.rcParams['image.cmap'] = 'magma' # Colormap.
plt.rcParams['image.interpolation'] = None
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['axes.prop_cycle'] = cycler(color=['dodgerblue','lawngreen','tomato','darkorchid','gold','lightgray'])
```
/tmp/ipykernel_418145/4030760818.py:10: MatplotlibDeprecationWarning: You are modifying the state of a globally registered colormap. This has been deprecated since 3.3 and in 3.6, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = mpl.cm.get_cmap("magma").copy()
cmap.set_bad('k',1.)
/tmp/ipykernel_418145/4030760818.py:14: MatplotlibDeprecationWarning: Support for setting an rcParam that expects a str value to a non-str value is deprecated since 3.5 and support will be removed two minor releases later.
plt.rcParams['image.interpolation'] = None
## Load in observations
```python
# Load in the data files
data_files_path = '/YOUR_DATA_PATH_HERE/' #point to where the data is
rateints_files_nrs1 = glob.glob(data_files_path+'*nrs1_stage_1.fits')
rateints_files_nrs2 = glob.glob(data_files_path+'*nrs2_stage_1.fits')
# Grab relevant ancillary files, etc
ancillary_files = '/YOUR_DATA_PATH_HERE/'
times = np.loadtxt(ancillary_files + 'jw01366003001_04101_00001-seg001-003_nrs2_times.txt')
wvl_fits = fits.open(ancillary_files + 'jw01366003001_04101_00001-seg001_nrs2_stage_2_wavelengthmap.fits')[0].data
```
```python
# Get the gain from the crds files for error calculation
gain = np.median(fits.open('/YOUR_CRDS_PATH_HERE/'+'jwst_nirspec_gain_nrs2.fits')[1].data)
```
```python
midtime_bjd = times[:,5]
# Get the integration time from the time file for error calculation
integration_time = np.median(abs((times[:,4]-times[:,6])*24*60*60))
print(len(midtime_bjd), integration_time)
```
465 63.158400007523596
Since the observations come down in segments, we want to stitch them back together to run this full dataset.
We'll use Jeff Valenti's unsegment function, which unpacks everything in a nice 3D array for us. We also need to make sure we do this for both the science and data quality flag extensions so we can clean everything up
```python
trimming = 5
wvl_fits = wvl_fits[:,trimming-5:]
sci_cube, _ = jedi.unsegment(sorted(rateints_files_nrs2), 1)
sci_cube = sci_cube[:,:,trimming:-5]
print("Science",sci_cube.shape)
err_cube, size = jedi.unsegment(sorted(rateints_files_nrs2), 2)
err_cube = err_cube[:,:,trimming:-5]
print("Errors",err_cube.shape)
dq_cube, size = jedi.unsegment(sorted(rateints_files_nrs2), 3)
dq_cube = dq_cube[:,:,trimming:-5]
print("DQ Flags",dq_cube.shape)
```
(465, 32, 2038)
## Quick Look
First let's take a look at the break down of data quality flags. The dq_flag_metric() function tells us the number of pixels in each category, and how many pixels in our entire observation have a DQ flag
```python
jedi.dq_flag_metrics(sci_cube, dq_cube, plot_bit=None)
```
===== DQ flags info =====
Found 30226 pixels with DQ bit=0 name=DO_NOT_USE.
Found 12472 pixels with DQ bit=1 name=SATURATED.
Found 110406 pixels with DQ bit=2 name=JUMP_DET.
Found 0 pixels with DQ bit=3 name=DROPOUT.
Found 0 pixels with DQ bit=4 name=OUTLIER.
Found 0 pixels with DQ bit=5 name=PERSISTENCE.
Found 0 pixels with DQ bit=6 name=AD_FLOOR.
Found 0 pixels with DQ bit=7 name=RESERVED.
Found 0 pixels with DQ bit=8 name=UNRELIABLE_ERROR.
Found 0 pixels with DQ bit=9 name=NON_SCIENCE.
Found 33480 pixels with DQ bit=10 name=DEAD.
Found 0 pixels with DQ bit=11 name=HOT.
Found 0 pixels with DQ bit=12 name=WARM.
Found 7440 pixels with DQ bit=13 name=LOW_QE.
Found 0 pixels with DQ bit=14 name=RC.
Found 0 pixels with DQ bit=15 name=TELEGRAPH.
Found 0 pixels with DQ bit=16 name=NONLINEAR.
Found 0 pixels with DQ bit=17 name=BAD_REF_PIXEL.
Found 0 pixels with DQ bit=18 name=NO_FLAT_FIELD.
Found 25575 pixels with DQ bit=19 name=NO_GAIN_VALUE.
Found 0 pixels with DQ bit=20 name=NO_LIN_CORR.
Found 0 pixels with DQ bit=21 name=NO_SAT_CHECK.
Found 0 pixels with DQ bit=22 name=UNRELIABLE_BIAS.
Found 0 pixels with DQ bit=23 name=UNRELIABLE_DARK.
Found 0 pixels with DQ bit=24 name=UNRELIABLE_SLOPE.
Found 0 pixels with DQ bit=25 name=UNRELIABLE_FLAT.
Found 0 pixels with DQ bit=26 name=OPEN.
Found 3720 pixels with DQ bit=27 name=ADJ_OPEN.
Found 0 pixels with DQ bit=28 name=UNRELIABLE_RESET.
Found 0 pixels with DQ bit=29 name=MSA_FAILED_OPEN.
Found 0 pixels with DQ bit=30 name=OTHER_BAD_PIXEL.
Found 0 pixels with DQ bit=31 name=REFERENCE_PIXEL.
DQ fraction of total pixels=0.736 %
Next we should check out a random image, just to make sure we're seeing what we expect
```python
initial_look = (sci_cube[113])
plt.figure()
plt.imshow((initial_look))#, vmin=0, vmax=4.5)
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="counts", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
```

/tmp/ipykernel_418145/545534538.py:11: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_418145/545534538.py:11: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)

yeay! a nirspec!
## Data Quality Flags
Using the jwst pipeline data quality flags, we'll first replace any flagged pixels with the median of its neighbours - so pixels in the same row within a window region
We can specify which flags we want to replace using the bits_to_mask arguement, where each number corresponds to the bit for an individual flag
Some key examples are
- 0 DO_NOT_USE, bad pixels (dq flag = 1)
- 1 SATURATED, pixel saturated during exposure (dq flag = 2)
- 10 DEAD, dead pixel (dq flag = 1024)
- 11 HOT, hot pixel
Sometimes the flags seem a little overzealous on the hot pixel one, so maybe check this a couple times before committing!
There's more detail on each of the flags in Table 3 here: https://jwst-pipeline.readthedocs.io/_/downloads/en/latest/pdf/
(this step can take a little while, there are a lot of pixels!)
```python
"""
# If you knew for example that one particular pixel was bad but wasn't being flagged or cleaned for some reason
# you could flag it here as a non-science value so you can force it to get cleaned here by uncommenting this line
"""
# dq_cube[:,24,1970]+=(2**9)
rawest_data = jedi.dq_flat_replace(sci_cube, dq_cube, bits_to_mask=[0,1,9,10,11,13,19], window_size=4)
```
100%|████████████████████████████████| 150656/150656 [00:07<00:00, 20275.17it/s]
Replaced 43627 pixels
## Outliers Through Space and Time
(would be a cool movie!)
Now we want to check for any significant outliers throughout the dataset, either pixels that are constantly Not Good (space), or pixels that are not good during a particular integration (time).
We'll replace the outliers with the median of values near them in which ever axis they are outliers
```python
raw_data = jedi.outliers_through_time(rawest_data, window_size=10, n_sig=20, plot=False)
# WINDOW SIZE IS IMPORTANT
```
No more outliers found
In total 451 outliers were found
```python
cleaned_data, counter = jedi.outliers_through_space(raw_data, replace_window=4, search_window=21, poly_order=0, n_sig=6, plot=True)
```
0%| | 0/465 [00:00<?, ?it/s]/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:152: RuntimeWarning: invalid value encountered in divide
dev_row = np.abs(res_row) / np.std(res_row)
100%|█████████████████████████████████████████| 465/465 [02:59<00:00, 2.59it/s]

We can plot up that same image from before at each stage of the cleaning to watch how each step takes care of noisy regions
```python
plt.figure()
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(rawest_data[113]))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(raw_data[113]))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
plt.figure()
plt.imshow(np.log10(cleaned_data[113]))#, vmin=-1, vmax=2.5)
#plt.title('{}'.format(workbook))
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.colorbar(label="log(counts)", orientation='horizontal')
plt.show()
```
/tmp/ipykernel_418145/3584592247.py:2: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_418145/3584592247.py:2: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(initial_look))#, vmin=-1, vmax=2.5)

/tmp/ipykernel_418145/3584592247.py:10: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(rawest_data[113]))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_418145/3584592247.py:10: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(rawest_data[113]))#, vmin=-1, vmax=2.5)

/tmp/ipykernel_418145/3584592247.py:18: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(raw_data[113]))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_418145/3584592247.py:18: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(raw_data[113]))#, vmin=-1, vmax=2.5)

/tmp/ipykernel_418145/3584592247.py:26: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(cleaned_data[113]))#, vmin=-1, vmax=2.5)
/tmp/ipykernel_418145/3584592247.py:26: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(cleaned_data[113]))#, vmin=-1, vmax=2.5)

## Find the aperture
Here we want to fit a gaussian to each column of the test image to get the center and width, and then fit a polynomial to each of those across the x axis
```python
test_image = cleaned_data[100].copy() #/ flat # do our flat field correction on our test image
test_err = err_cube[100].copy()
# Might need to vary these initially to get the thing going
ap_start = 0 # x pixel where we want to start hunting for the trace
ap_end = 2038 # x pixel where the trace ends (or just the edge of the detector probs for g395h)
init_guess = 0.7 # how wide do we think the trace is
# Will want to test different values of these to make sure getting the best lcs
polynomial_order = [4, 4] # what order polynomials to use when fitting the trace position and its width
median_filter_window = 5 # MUST BE ODD NUMBER window size applied to the median filter that smooths the trace widths
aperture_width = 5 # number of fwhms to extend the aperture to
extrapolate_method = 'continue' # 'flatten', 'continue' or None
continue_value = [0,0]
trace_falls_off = True # set this to True if the trace falls off the top/bottom of the detector
# this will enable the aperture to default to the top and bottom edge of the detector
# rather than returning an error message
trace_position, trace_width, upper_ap, lower_ap, up_trim, low_trim = jedi.get_aperture(test_image, init_guess, \
ap_start, ap_end, \
poly_orders=polynomial_order, \
width=aperture_width, \
medflt=median_filter_window, \
extrapolate_method=extrapolate_method,\
continue_value=continue_value, \
set_to_edge=trace_falls_off)
plt.figure()
if extrapolate_method=='continue':
ap_start = ap_start-continue_value[0]
ap_end = ap_end+continue_value[1]
plt.plot(np.arange(ap_start,ap_end),trace_position,color='k',ls='--')
plt.xlabel("$x$ pixel")
plt.ylabel("$y$ pixel")
plt.fill_between(np.arange(0,np.shape(test_image)[1]), upper_ap, lower_ap, facecolor = 'None',edgecolor='w')
plt.fill_between(np.arange(ap_start,ap_end), up_trim, low_trim, facecolor="None", edgecolor='g')
plt.imshow(np.log10(test_image))#,vmin=0,vmax=4.5)
plt.colorbar(label="log(counts)", orientation='horizontal')
#plt.title('{}'.format(workbook))
plt.show()
```
/tmp/ipykernel_418145/2203269741.py:46: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(test_image))#,vmin=0,vmax=4.5)
/tmp/ipykernel_418145/2203269741.py:46: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(test_image))#,vmin=0,vmax=4.5)

```python
pixel_column = 1000
jedi.column_fit_visualiser(test_image, pixel_column, init_guess, aperture_width)
```
## 1/f noise
We're gonna want to do a column by column median, masking the spectral trace area.
Let's define a buffer region above and below the aperture to make sure we're definitely not including any of that spectrum
```python
fnoise_mask, _, _ = jedi.f_noise_zone(test_image, upper_ap, lower_ap, ap_buffers=[4,4], plot=True, set_to_edge=True, vmin=-3, vmax=2.5)
```
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:563: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:563: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)

/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:583: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(np.ma.masked_array(im,mask=mask)), vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:583: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(np.ma.masked_array(im,mask=mask)), vmin=vmin,vmax=vmax)

Now let's actually remove the 1/f noise from the test image to make sure the region wasn't accidentally clipping anything
```python
clean_test_im = jedi.remove_fnoise(test_image, fnoise_mask, plot=True, vmin=-3, vmax=2.5)
```
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:615: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:615: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(im),vmin=vmin,vmax=vmax)

/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:623: RuntimeWarning: divide by zero encountered in log10
plt.imshow(np.log10(clean_im),vmin=vmin,vmax=vmax)
/home/ym20900/ers/w39_g395/r7/exotic_jedi.py:623: RuntimeWarning: invalid value encountered in log10
plt.imshow(np.log10(clean_im),vmin=vmin,vmax=vmax)


## Extract!
We want to do an intrapixel extraction so we don't erase all the hard work making a curvy trace
Here we'll quickly compare the intrapixel extraction with a more standard hard edge box so we can see how it impact the **shape** of the spectrum, not just the total flux!
```python
test_spectrum, _ = jedi.intrapixel_extraction(test_image, test_err, upper_ap, lower_ap)
test_spectrum_basic, _ = jedi.basic_extraction(test_image, test_err, upper_ap, lower_ap)
figure, ax = plt.subplots(2,1, figsize=(10,7))
ax[0].plot(test_spectrum, alpha=0.8, label="Intrapixel")
ax[0].plot(test_spectrum_basic, alpha=0.8, label="Basic")
ax[0].set_xlabel("Column Number")
ax[0].set_ylabel("Counts")
ax[0].legend()
ax[1].plot(test_spectrum-test_spectrum_basic, marker='.', ls='None')
ax[1].axhline(0,ls=':', color='k', alpha=0.7, zorder=0)
ax[1].set_xlabel("Column Number")
ax[1].set_ylabel("Residuals")
plt.title('{}'.format(workbook))
plt.show()
```

The extraction method is important!! The difference isn't just that one gives overall higher flux, they produce spectra with different shapes
## Get! Those! Spectra!
With everything we've set up, it's time to get the spectra and correlate them for x and y pixel shifts
Playing around with the trimming and the high res factors here might be important. In the tuples, each number refers to either the x or y shifts,
i.e., trim_spec=[value_for_x,value_for_y]
```python
all_spectra, all_errors, all_y_collapse, x_shifts, y_shifts = jedi.get_stellar_spectra(cleaned_data, err_cube, upper_ap, lower_ap, \
flat=None, f_mask=fnoise_mask, \
extract_method="intrapixel", \
shift=True, interpolate_mode="cubic", \
trim_spec=[3,1], high_res_factor=[0.01,0.01], \
trim_fit=[10,10], \
plot=True, set_to_edge = True)
```
Running intrapixel extraction on 465 spectra
No flat fielding is being performed at this time
1/f noise is being removed
100%|█████████████████████████████████████████| 465/465 [00:38<00:00, 12.09it/s]
Now calculating shifts
100%|█████████████████████████████████████████| 465/465 [00:25<00:00, 18.43it/s]

Let's do the same again on the original, uncorrected spectra, without the 1/f noise correction so we can see what difference our cleaning processes have made
```python
unclean_spectra, _, _, _, _ = jedi.get_stellar_spectra(np.nan_to_num(sci_cube), err_cube, upper_ap, lower_ap, \
flat=None, f_mask=fnoise_mask, \
extract_method="intrapixel", \
shift=True, interpolate_mode="cubic", \
trim_spec=[3,1], high_res_factor=[0.01,0.01], \
trim_fit=[10,10], \
plot=False, set_to_edge = True)
```
Running intrapixel extraction on 465 spectra
No flat fielding is being performed at this time
1/f noise is being removed
100%|█████████████████████████████████████████| 465/465 [00:38<00:00, 12.18it/s]
Now calculating shifts
100%|█████████████████████████████████████████| 465/465 [00:24<00:00, 18.77it/s]
## Wavelength Solution
Using the fits file provided in Stage 1 we need to extract out a wavelength solution. The wavelength solution is a 2D array, and we need to pick a value for each pixel - we can use the trace positions for this
```python
plt.figure()
plt.imshow(wvl_fits)
plt.plot(np.arange(ap_start,ap_end),trace_position,color='w',ls='--')
plt.show()
wvls = []
for count, pixel_column in enumerate(np.arange(ap_start,ap_end)):
pixel_row = int(trace_position[count])
wvls.append(wvl_fits[pixel_row, pixel_column])
print(len(wvls))
```

2038
## Diagnostic Plots and Visualisations
```python
# Make some fake array for plotting, can be useful for locating troublesome pixels,
# or if your wavelength solution / time arrays aren't nailed down yet for whatever reason
fake_time = np.arange(np.shape(all_spectra)[0])
fake_wvls = np.arange(np.shape(all_spectra)[1])
```
Check how the x and y shifts and how the fwhm varies through time
```python
fwhm_wvls = [4.0,4.5,5.0] # at what wavelengths do we want to check the fwhms? you can give as many as you'd like
fwhm_array = jedi.fwhm_through_time_grabber(cleaned_data, wvls, fwhm_wvls)
```
```python
lc = np.sum(all_spectra, axis=1)/np.sum(all_spectra[-1])
plt.figure()
plt.plot(midtime_bjd, lc, color='darkorchid', ls='none',marker='.',alpha=0.2)
plt.ylabel("Normalised Flux")
plt.xlabel("Time (BJD)")
plt.show()
plt.figure()
plt.plot(midtime_bjd, x_shifts - np.median(x_shifts), color = 'lawngreen', label = '$x$', alpha=0.5)
plt.plot(midtime_bjd, y_shifts - np.median(y_shifts), color = 'dodgerblue', label = '$y$', alpha=0.5)
plt.xlabel("Time (BJD)")
plt.ylabel("Pixel Shifts")
plt.legend()
plt.show()
plt.figure()
for column, wvl in enumerate(fwhm_wvls):
plt.plot(midtime_bjd,fwhm_array[:,column]/np.median(fwhm_array[:,column]), label = wvl,alpha=0.5)
plt.legend()
plt.xlabel("Time (BJD)")
plt.ylabel("Normalised FWHM")
plt.show()
```
```python
resolution = 800
bin_time, bin_flux = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, lc)
bin_time, bin_x_shift = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, (x_shifts - np.median(x_shifts)))
bin_time, bin_y_shift = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, (y_shifts - np.median(y_shifts)))
plt.figure()
plt.plot(fake_time, lc, color='darkorchid', ls='none',marker='.',alpha=0.2)
plt.plot(bin_time, bin_flux, color='k', ls='none',marker='.')
plt.ylabel("Normalised Flux")
plt.xlabel("Time (data points)")
plt.show()
plt.figure()
plt.plot(bin_time, bin_x_shift, color = 'lawngreen', label = '$x$', alpha=0.8)
plt.plot(bin_time, bin_y_shift, color = 'dodgerblue', label = '$y$', alpha=0.8)
plt.xlabel("Time (data points)")
plt.ylabel("Pixel Shifts")
plt.legend()
plt.show()
plt.figure()
for column, wvl in enumerate(fwhm_wvls):
bin_time, bin_fwhm = jedi.binning(np.arange(0,len(fake_time), len(fake_time)/resolution), fake_time, (fwhm_array[:,column]/np.median(fwhm_array[:,column])))
plt.plot(bin_time,bin_fwhm, label = wvl,alpha=0.8)
plt.legend()
plt.xlabel("Time (data points)")
plt.ylabel("Normalised FWHM")
plt.show()
```
And finally the white light curve!
```python
pre_transit = 1500 # Make sure you change these to be relevant to your observation
pst_transit = 3700
lc = np.sum(all_spectra, axis=1)/np.sum(all_spectra[-1])
plt.figure()
plt.plot(midtime_bjd[10:], lc[10:], ls='none',marker='.')
plt.plot(midtime_bjd[10:pre_transit], lc[10:pre_transit], ls='none',marker='.')
plt.plot(midtime_bjd[pst_transit:], lc[pst_transit:], ls='none',marker='.')
plt.xlabel("Time")
plt.ylabel("Flux")
plt.show()
sdev_lcs = np.std(lc[10:pre_transit])
sdev_lce = np.std(lc[pst_transit:])
print('Sdev before tilt (pre transit) = ', sdev_lcs*1e6, ' ppm')
print('Sdev after tilt (post transit) = ', sdev_lce*1e6, ' ppm')
```

Sdev before tilt (pre transit) = 212.58149858615607 ppm
Sdev after tilt (post transit) = 177.15501445692888 ppm
We can plot the stellar spectra up in 2D time-wavelength space to check what we've done so far. The compare_2d_spectra() function allows us to do this for different stages of the process, so we'll see dead pixels etc disappear between the images.
```python
jedi.compare_2d_spectra(all_spectra, unclean_spectra, wvls, midtime_bjd, \
time_units="BJD", residual_limits=[-0.025,0.025], spectra_limits=[0.975,1.01])
# Picking the residual and spectra limits here can make a big difference in the appearance of the plots,
# especially for very clean or shallow transits
```

We can check the 1/f noise over the whole stack and make a periodogram if we'd like. There isn't much of a difference here because this data was cleaned for 1/f noise at the group level, but for other observations you'll see the before curve is significantly above the after curve on the left hand side of the plot
(this step takes a while)
```python
jedi.check_1f(cleaned_data.copy(), fnoise_mask, stack=True)
```
100%|█████████████████████████████████████████| 465/465 [09:07<00:00, 1.18s/it]
0.8965899999989636

## Saving Out Results
For the G395H ERS observations, Lili Alderson & Natasha Batalha set up a standard format for saving out all data reduction products using xarrays. You can find out more about it via this google colab:
https://colab.research.google.com/drive/1VpLYiqLGsxsPZZBMMw8ZyYtgkP5Tqy-s?usp=sharing
There's a small learning curve to getting started with xarrays, but they make life significantly easier in the long run, especially when reading in lots of data all at once!
```python
# Swap some file names around for ease
time_flux = midtime_bjd
wavelength = wvls
flux = all_spectra # * integration_time * gain # use the gain so we can calculate simple square root errors
flux_error = all_errors #np.sqrt(flux)
quality_flag = np.ones(np.shape(all_spectra), dtype=bool) # for Chromatic
x_shift = x_shifts - np.median(x_shifts)
y_shift = y_shifts - np.median(y_shifts)
```
```python
# Save out those xarrays! Here's everything you need to import:
import astropy.units as u
from astropy.utils.misc import JsonCustomEncoder
from astropy.time import Time
import xarray as xr
# put data into a dataset
ds = xr.Dataset(
#now data is a function of two dimensions
data_vars=dict(flux=(["time_flux","wavelength"], flux,{'units': 'electron'}),
flux_error=(["time_flux","wavelength"], flux_error,{'units': 'electron'}),
quality_flag=(["time_flux","wavelength"], quality_flag,{'units': ''}),#unitless!
x_shift=(["time_flux"], x_shift,{'units': ''}),
y_shift=(["time_flux"], y_shift,{'units': ''})
),
coords=dict(
wavelength=(["wavelength"],
wavelength,{'units': 'micron'}),#required*
time_flux=(["time_flux"],
time_flux,{'units': 'bjd'}),#required*
),
##################################################
# You'll want to change this info in particular!!
##################################################
attrs=dict(author="YOUR NAME HERE", #required
contact="[email protected]", #required,
code="https://github.com/Exo-TiC/ExoTiC-JEDI", #could also insert github link
notes="Using reduction_v7_groupLevelDestriping from Box",
normalised="No",
doi="none",#optional if there is a citation to reference
)
)
# Then save it out! But change the {} to suit yourself
ds.to_netcdf("stellar-spec-{planet}-{mode}-{detector}-exoticjedi-{yourname}.nc")
```
If xarrays are seem a bit daunting right now, pickles are your friend!
```python
output_dict = {
"bjd_midtimes" : time_flux,
"wavlength_um" : wavelength,
"flux" : flux,
'flux_error' : flux_error,
'x_shift' : x_shift,
'y_shift' : y_shift,
'author' : "YOUR NAME HERE",
'contact' : "[email protected]",
'code' : "exotic-jedi",
'notes' : "Using reduction_v7_groupLevelDestriping from Box",
}
pickle.dump(output_dict, open('insertname.pickle','wb'))
```
/tmp/ipykernel_1799213/1763087207.py:4: RuntimeWarning: invalid value encountered in sqrt
flux_error = np.sqrt(flux)
|
Exo-TiCREPO_NAMEExoTiC-JEDIPATH_START.@ExoTiC-JEDI_extracted@ExoTiC-JEDI-master@notebooks@padawan@[email protected]_END.py
|
{
"filename": "_least_angle.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/linear_model/_least_angle.py",
"type": "Python"
}
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import sys
import warnings
from math import log
from numbers import Integral, Real
import numpy as np
from scipy import interpolate, linalg
from scipy.linalg.lapack import get_lapack_funcs
from ..base import MultiOutputMixin, RegressorMixin, _fit_context
from ..exceptions import ConvergenceWarning
from ..model_selection import check_cv
# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs'
from ..utils import ( # type: ignore
Bunch,
arrayfuncs,
as_float_array,
check_random_state,
)
from ..utils._metadata_requests import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
from ..utils.parallel import Parallel, delayed
from ..utils.validation import validate_data
from ._base import LinearModel, LinearRegression, _preprocess_data
SOLVE_TRIANGULAR_ARGS = {"check_finite": False}
@validate_params(
{
"X": [np.ndarray, None],
"y": [np.ndarray, None],
"Xy": [np.ndarray, None],
"Gram": [StrOptions({"auto"}), "boolean", np.ndarray, None],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"alpha_min": [Interval(Real, 0, None, closed="left")],
"method": [StrOptions({"lar", "lasso"})],
"copy_X": ["boolean"],
"eps": [Interval(Real, 0, None, closed="neither"), None],
"copy_Gram": ["boolean"],
"verbose": ["verbose"],
"return_path": ["boolean"],
"return_n_iter": ["boolean"],
"positive": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def lars_path(
X,
y,
Xy=None,
*,
Gram=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""Compute Least Angle Regression or Lasso path using the LARS algorithm.
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lar', the objective function is only known in
the form of an implicit equation (see discussion in [1]_).
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. If X is `None`, Gram must also be `None`.
If only the Gram matrix is available, use `lars_path_gram` instead.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_features,), default=None
`Xy = X.T @ y` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto', bool, ndarray of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `X.T @ X`, if `'auto'`, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter `alpha` in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select `'lar'` for Least Angle
Regression, `'lasso'` for the Lasso.
copy_X : bool, default=True
If `False`, `X` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the `tol` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If `False`, `Gram` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If `True`, returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (`alphas_[alphas_ > 0.].min()` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent `lasso_path` function.
Returns
-------
alphas : ndarray of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
`n_alphas` is either `max_iter`, `n_features`, or the
number of nodes in the path with `alpha >= alpha_min`, whichever
is smaller.
active : ndarray of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : ndarray of shape (n_features, n_alphas + 1)
Coefficients along the path.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is set
to True.
See Also
--------
lars_path_gram : Compute LARS path in the sufficient stats mode.
lasso_path : Compute Lasso path with coordinate descent.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLarsCV : Cross-validated Lasso, using the LARS algorithm.
LarsCV : Cross-validated Least Angle Regression model.
sklearn.decomposition.sparse_encode : Sparse coding.
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
Examples
--------
>>> from sklearn.linear_model import lars_path
>>> from sklearn.datasets import make_regression
>>> X, y, true_coef = make_regression(
... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0
... )
>>> true_coef
array([ 0. , 0. , 0. , 97.9..., 45.7...])
>>> alphas, _, estimated_coef = lars_path(X, y)
>>> alphas.shape
(3,)
>>> estimated_coef
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 46.96..., 97.99...],
[ 0. , 0. , 45.70...]])
"""
if X is None and Gram is not None:
raise ValueError(
"X cannot be None if Gram is not None"
"Use lars_path_gram to avoid passing X and y."
)
return _lars_path_solver(
X=X,
y=y,
Xy=Xy,
Gram=Gram,
n_samples=None,
max_iter=max_iter,
alpha_min=alpha_min,
method=method,
copy_X=copy_X,
eps=eps,
copy_Gram=copy_Gram,
verbose=verbose,
return_path=return_path,
return_n_iter=return_n_iter,
positive=positive,
)
@validate_params(
{
"Xy": [np.ndarray],
"Gram": [np.ndarray],
"n_samples": [Interval(Integral, 0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"alpha_min": [Interval(Real, 0, None, closed="left")],
"method": [StrOptions({"lar", "lasso"})],
"copy_X": ["boolean"],
"eps": [Interval(Real, 0, None, closed="neither"), None],
"copy_Gram": ["boolean"],
"verbose": ["verbose"],
"return_path": ["boolean"],
"return_n_iter": ["boolean"],
"positive": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def lars_path_gram(
Xy,
Gram,
*,
n_samples,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""The lars_path in the sufficient stats mode.
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lar', the objective function is only known in
the form of an implicit equation (see discussion in [1]_).
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
Xy : ndarray of shape (n_features,)
`Xy = X.T @ y`.
Gram : ndarray of shape (n_features, n_features)
`Gram = X.T @ X`.
n_samples : int
Equivalent size of sample.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select `'lar'` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If `False`, `X` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the `tol` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If `False`, `Gram` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If `return_path==True` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (`alphas_[alphas_ > 0.].min()` when `fit_path=True`) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : ndarray of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
`n_alphas` is either `max_iter`, `n_features` or the
number of nodes in the path with `alpha >= alpha_min`, whichever
is smaller.
active : ndarray of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : ndarray of shape (n_features, n_alphas + 1)
Coefficients along the path.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is set
to True.
See Also
--------
lars_path_gram : Compute LARS path.
lasso_path : Compute Lasso path with coordinate descent.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLarsCV : Cross-validated Lasso, using the LARS algorithm.
LarsCV : Cross-validated Least Angle Regression model.
sklearn.decomposition.sparse_encode : Sparse coding.
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
Examples
--------
>>> from sklearn.linear_model import lars_path_gram
>>> from sklearn.datasets import make_regression
>>> X, y, true_coef = make_regression(
... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0
... )
>>> true_coef
array([ 0. , 0. , 0. , 97.9..., 45.7...])
>>> alphas, _, estimated_coef = lars_path_gram(X.T @ y, X.T @ X, n_samples=100)
>>> alphas.shape
(3,)
>>> estimated_coef
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 46.96..., 97.99...],
[ 0. , 0. , 45.70...]])
"""
return _lars_path_solver(
X=None,
y=None,
Xy=Xy,
Gram=Gram,
n_samples=n_samples,
max_iter=max_iter,
alpha_min=alpha_min,
method=method,
copy_X=copy_X,
eps=eps,
copy_Gram=copy_Gram,
verbose=verbose,
return_path=return_path,
return_n_iter=return_n_iter,
positive=positive,
)
def _lars_path_solver(
X,
y,
Xy=None,
Gram=None,
n_samples=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lar', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. Note that if X is None then Gram must be specified,
i.e., cannot be None or False.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_features,), default=None
`Xy = np.dot(X.T, y)` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
n_samples : int or float, default=None
Equivalent size of sample. If `None`, it will be `n_samples`.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if method == "lar" and positive:
raise ValueError("Positive constraint not supported for 'lar' coding method.")
n_samples = n_samples if n_samples is not None else y.size
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if Gram is None or Gram is False:
Gram = None
if X is None:
raise ValueError("X and Gram cannot both be unspecified.")
elif isinstance(Gram, str) and Gram == "auto" or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Gram is None:
n_features = X.shape[1]
else:
n_features = Cov.shape[0]
if Gram.shape != (n_features, n_features):
raise ValueError("The shapes of the inputs Gram and Xy do not match.")
if copy_X and X is not None and Gram is None:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy("F")
max_features = min(max_iter, n_features)
dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None)
if len(dtypes) == 1:
# use the precision level of input data if it is consistent
return_dtype = next(iter(dtypes))
else:
# fallback to double precision otherwise
return_dtype = np.float64
if return_path:
coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype)
alphas = np.zeros(max_features + 1, dtype=return_dtype)
else:
coef, prev_coef = (
np.zeros(n_features, dtype=return_dtype),
np.zeros(n_features, dtype=return_dtype),
)
alpha, prev_alpha = (
np.array([0.0], dtype=return_dtype),
np.array([0.0], dtype=return_dtype),
)
# above better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
if Gram is None:
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (X,))
else:
L = np.empty((max_features, max_features), dtype=Gram.dtype)
swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (Cov,))
(solve_cholesky,) = get_lapack_funcs(("potrs",), (L,))
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write(".")
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
cov_precision = np.finfo(Cov.dtype).precision
equality_tolerance = np.finfo(np.float32).eps
if Gram is not None:
Gram_copy = Gram.copy()
Cov_copy = Cov.copy()
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.0
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = (prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(
L[:n_active, :n_active],
L[n_active, :n_active],
trans=0,
lower=1,
overwrite_b=True,
**SOLVE_TRIANGULAR_ARGS,
)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
warnings.warn(
"Regressors in active set degenerate. "
"Dropping a regressor, after %i iterations, "
"i.e. alpha=%.3e, "
"with an active set of %i regressors, and "
"the smallest cholesky pivot element being %.3e."
" Reduce max_iter or increase eps parameters."
% (n_iter, alpha.item(), n_active, diag),
ConvergenceWarning,
)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print(
"%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], "", n_active, C)
)
if method == "lasso" and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn(
"Early stopping the lars path, as the residues "
"are small and the current value of alpha is no "
"longer well controlled. %i iterations, alpha=%.3e, "
"previous alpha=%.3e, with an active set of %i "
"regressors." % (n_iter, alpha.item(), prev_alpha.item(), n_active),
ConvergenceWarning,
)
break
# least squares solution
least_squares, _ = solve_cholesky(
L[:n_active, :n_active], sign_active[:n_active], lower=True
)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.0
else:
# is this really needed ?
AA = 1.0 / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[:: n_active + 1] += (2**i) * eps
least_squares, _ = solve_cholesky(
L_, sign_active[:n_active], lower=True
)
tmp = max(np.sum(least_squares * sign_active[:n_active]), eps)
AA = 1.0 / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares)
# Explicit rounding can be necessary to avoid `np.argmax(Cov)` yielding
# unstable results because of rounding errors.
np.around(corr_eq_dir, decimals=cov_precision, out=corr_eq_dir)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == "lasso":
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
coefs[-add_features:] = 0
alphas = np.resize(alphas, n_iter + add_features)
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == "lasso":
# handle the case when idx is not length of 1
for ii in idx:
arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii)
n_active -= 1
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.0) # just to maintain size
if verbose > 1:
print(
"%s\t\t%s\t\t%s\t\t%s\t\t%s"
% (n_iter, "", drop_idx, n_active, abs(temp))
)
if return_path:
# resize coefs in case of early stop
alphas = alphas[: n_iter + 1]
coefs = coefs[: n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
"""Least Angle Regression model a.k.a. LAR.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
n_nonzero_coefs : int, default=500
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of shape (n_alphas,) or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path: Compute Least Angle Regression or Lasso
path using LARS algorithm.
LarsCV : Cross-validated Least Angle Regression model.
sklearn.decomposition.sparse_encode : Sparse coding.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.Lars(n_nonzero_coefs=1)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
Lars(n_nonzero_coefs=1)
>>> print(reg.coef_)
[ 0. -1.11...]
"""
_parameter_constraints: dict = {
"fit_intercept": ["boolean"],
"verbose": ["verbose"],
"precompute": ["boolean", StrOptions({"auto"}), np.ndarray, Hidden(None)],
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")],
"eps": [Interval(Real, 0, None, closed="left")],
"copy_X": ["boolean"],
"fit_path": ["boolean"],
"jitter": [Interval(Real, 0, None, closed="left"), None],
"random_state": ["random_state"],
}
method = "lar"
positive = False
def __init__(
self,
*,
fit_intercept=True,
verbose=False,
precompute="auto",
n_nonzero_coefs=500,
eps=np.finfo(float).eps,
copy_X=True,
fit_path=True,
jitter=None,
random_state=None,
):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
@staticmethod
def _get_gram(precompute, X, y):
if (not hasattr(precompute, "__array__")) and (
(precompute is True)
or (precompute == "auto" and X.shape[0] > X.shape[1])
or (precompute == "auto" and y.shape[1] > 1)
):
precompute = np.dot(X.T, X)
return precompute
def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
"""Auxiliary method to fit the model using X, y as training data"""
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
Gram = self._get_gram(self.precompute, X, y)
self.alphas_ = []
self.n_iter_ = []
self.coef_ = np.empty((n_targets, n_features), dtype=X.dtype)
if fit_path:
self.active_ = []
self.coef_path_ = []
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X,
y[:, k],
Gram=Gram,
Xy=this_Xy,
copy_X=self.copy_X,
copy_Gram=True,
alpha_min=alpha,
method=self.method,
verbose=max(0, self.verbose - 1),
max_iter=max_iter,
eps=self.eps,
return_path=True,
return_n_iter=True,
positive=self.positive,
)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_[k] = coef_path[:, -1]
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0]
for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)
]
self.n_iter_ = self.n_iter_[0]
else:
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X,
y[:, k],
Gram=Gram,
Xy=this_Xy,
copy_X=self.copy_X,
copy_Gram=True,
alpha_min=alpha,
method=self.method,
verbose=max(0, self.verbose - 1),
max_iter=max_iter,
eps=self.eps,
return_path=False,
return_n_iter=True,
positive=self.positive,
)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like of shape (n_features,) or (n_features, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = validate_data(
self, X, y, force_writeable=True, y_numeric=True, multi_output=True
)
alpha = getattr(self, "alpha", 0.0)
if hasattr(self, "n_nonzero_coefs"):
alpha = 0.0 # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
if self.jitter is not None:
rng = check_random_state(self.random_state)
noise = rng.uniform(high=self.jitter, size=len(y))
y = y + noise
self._fit(
X,
y,
max_iter=max_iter,
alpha=alpha,
fit_path=self.fit_path,
Xy=Xy,
)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars.
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : Linear Model trained with L1 prior as
regularizer (aka the Lasso).
LassoCV : Lasso linear model with iterative fitting
along a regularization path.
LassoLarsCV: Cross-validated Lasso, using the LARS algorithm.
LassoLarsIC : Lasso model fit with Lars using BIC
or AIC for model selection.
sklearn.decomposition.sparse_encode : Sparse coding.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLars(alpha=0.01)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
LassoLars(alpha=0.01)
>>> print(reg.coef_)
[ 0. -0.955...]
"""
_parameter_constraints: dict = {
**Lars._parameter_constraints,
"alpha": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"positive": ["boolean"],
}
_parameter_constraints.pop("n_nonzero_coefs")
method = "lasso"
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
verbose=False,
precompute="auto",
max_iter=500,
eps=np.finfo(float).eps,
copy_X=True,
fit_path=True,
positive=False,
jitter=None,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(
X_train,
y_train,
X_test,
y_test,
Gram=None,
copy=True,
method="lar",
verbose=False,
fit_intercept=True,
max_iter=500,
eps=np.finfo(float).eps,
positive=False,
):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
alphas, active, coefs = lars_path(
X_train,
y_train,
Gram=Gram,
copy_X=False,
copy_Gram=False,
method=method,
verbose=max(0, verbose - 1),
max_iter=max_iter,
eps=eps,
positive=positive,
)
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation.
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of lists, the outer list length is `n_targets`.
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : Linear Model trained with L1 prior as
regularizer (aka the Lasso).
LassoCV : Lasso linear model with iterative fitting
along a regularization path.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
LassoLarsIC : Lasso model fit with Lars using BIC
or AIC for model selection.
sklearn.decomposition.sparse_encode : Sparse coding.
Notes
-----
In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.
Examples
--------
>>> from sklearn.linear_model import LarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
>>> reg = LarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9996...
>>> reg.alpha_
np.float64(0.2961...)
>>> reg.predict(X[:1,])
array([154.3996...])
"""
_parameter_constraints: dict = {
**Lars._parameter_constraints,
"max_iter": [Interval(Integral, 0, None, closed="left")],
"cv": ["cv_object"],
"max_n_alphas": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
}
for parameter in ["n_nonzero_coefs", "jitter", "fit_path", "random_state"]:
_parameter_constraints.pop(parameter)
method = "lar"
def __init__(
self,
*,
fit_intercept=True,
verbose=False,
max_iter=500,
precompute="auto",
cv=None,
max_n_alphas=1000,
n_jobs=None,
eps=np.finfo(float).eps,
copy_X=True,
):
self.max_iter = max_iter
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
super().__init__(
fit_intercept=fit_intercept,
verbose=verbose,
precompute=precompute,
n_nonzero_coefs=500,
eps=eps,
copy_X=copy_X,
fit_path=True,
)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.multi_output = False
return tags
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, **params):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict, default=None
Parameters to be passed to the CV splitter.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns an instance of self.
"""
_raise_for_params(params, self, "fit")
X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(splitter=Bunch(split={}))
# As we use cross-validation, the Gram matrix is not precomputed here
Gram = self.precompute
if hasattr(Gram, "__array__"):
warnings.warn(
'Parameter "precompute" cannot be an array in '
'%s. Automatically switch to "auto" instead.' % self.__class__.__name__
)
Gram = "auto"
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train],
y[train],
X[test],
y[test],
Gram=Gram,
copy=False,
method=self.method,
verbose=max(0, self.verbose - 1),
fit_intercept=self.fit_intercept,
max_iter=self.max_iter,
eps=self.eps,
positive=self.positive,
)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, _, _, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model using best_alpha
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
self._fit(
X,
y,
max_iter=self.max_iter,
alpha=best_alpha,
Xy=None,
fit_path=True,
)
return self
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__).add(
splitter=check_cv(self.cv),
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
return router
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
precompute : bool or 'auto' , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation.
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
active_ : list of int
Indices of active variables at the end of the path.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : Linear Model trained with L1 prior as
regularizer (aka the Lasso).
LassoCV : Lasso linear model with iterative fitting
along a regularization path.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
LassoLarsIC : Lasso model fit with Lars using BIC
or AIC for model selection.
sklearn.decomposition.sparse_encode : Sparse coding.
Notes
-----
The object solves the same problem as the
:class:`~sklearn.linear_model.LassoCV` object. However, unlike the
:class:`~sklearn.linear_model.LassoCV`, it find the relevant alphas values
by itself. In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the :class:`~sklearn.linear_model.LassoCV` if
only a small number of features are selected compared to the total number,
for instance if there are very few samples compared to the number of
features.
In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.
Examples
--------
>>> from sklearn.linear_model import LassoLarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4.0, random_state=0)
>>> reg = LassoLarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9993...
>>> reg.alpha_
np.float64(0.3972...)
>>> reg.predict(X[:1,])
array([-78.4831...])
"""
_parameter_constraints = {
**LarsCV._parameter_constraints,
"positive": ["boolean"],
}
method = "lasso"
def __init__(
self,
*,
fit_intercept=True,
verbose=False,
max_iter=500,
precompute="auto",
cv=None,
max_n_alphas=1000,
n_jobs=None,
eps=np.finfo(float).eps,
copy_X=True,
positive=False,
):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.max_iter = max_iter
self.precompute = precompute
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
self.copy_X = copy_X
self.positive = positive
# XXX : we don't use super().__init__
# to avoid setting n_nonzero_coefs
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion [2]_ and BIC is the Bayes
Information criterion [3]_. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <lasso_lars_ic>`.
Parameters
----------
criterion : {'aic', 'bic'}, default='aic'
The type of criterion to use.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
noise_variance : float, default=None
The estimated noise variance of the data. If `None`, an unbiased
estimate is computed by an OLS model. However, it is only possible
in the case where `n_samples > n_features + fit_intercept`.
.. versionadded:: 1.1
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If a list, it will be of length `n_targets`.
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array-like of shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criterion is
chosen, as specified in [1]_.
noise_variance_ : float
The estimated noise variance from the data used to compute the
criterion.
.. versionadded:: 1.1
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : Linear Model trained with L1 prior as
regularizer (aka the Lasso).
LassoCV : Lasso linear model with iterative fitting
along a regularization path.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
LassoLarsCV: Cross-validated Lasso, using the LARS algorithm.
sklearn.decomposition.sparse_encode : Sparse coding.
Notes
-----
The number of degrees of freedom is computed as in [1]_.
To have more details regarding the mathematical formulation of the
AIC and BIC criteria, please refer to :ref:`User Guide <lasso_lars_ic>`.
References
----------
.. [1] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani.
"On the degrees of freedom of the lasso."
The Annals of Statistics 35.5 (2007): 2173-2192.
<0712.0881>`
.. [2] `Wikipedia entry on the Akaike information criterion
<https://en.wikipedia.org/wiki/Akaike_information_criterion>`_
.. [3] `Wikipedia entry on the Bayesian information criterion
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>`_
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLarsIC(criterion='bic')
>>> X = [[-2, 2], [-1, 1], [0, 0], [1, 1], [2, 2]]
>>> y = [-2.2222, -1.1111, 0, -1.1111, -2.2222]
>>> reg.fit(X, y)
LassoLarsIC(criterion='bic')
>>> print(reg.coef_)
[ 0. -1.11...]
"""
_parameter_constraints: dict = {
**LassoLars._parameter_constraints,
"criterion": [StrOptions({"aic", "bic"})],
"noise_variance": [Interval(Real, 0, None, closed="left"), None],
}
for parameter in ["jitter", "fit_path", "alpha", "random_state"]:
_parameter_constraints.pop(parameter)
def __init__(
self,
criterion="aic",
*,
fit_intercept=True,
verbose=False,
precompute="auto",
max_iter=500,
eps=np.finfo(float).eps,
copy_X=True,
positive=False,
noise_variance=None,
):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
self.fit_path = True
self.noise_variance = noise_variance
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.multi_output = False
return tags
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
Returns an instance of self.
"""
if copy_X is None:
copy_X = self.copy_X
X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True)
X, y, Xmean, ymean, Xstd = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=copy_X
)
Gram = self.precompute
alphas_, _, coef_path_, self.n_iter_ = lars_path(
X,
y,
Gram=Gram,
copy_X=copy_X,
copy_Gram=True,
alpha_min=0.0,
method="lasso",
verbose=self.verbose,
max_iter=self.max_iter,
eps=self.eps,
return_n_iter=True,
positive=self.positive,
)
n_samples = X.shape[0]
if self.criterion == "aic":
criterion_factor = 2
elif self.criterion == "bic":
criterion_factor = log(n_samples)
else:
raise ValueError(
f"criterion should be either bic or aic, got {self.criterion!r}"
)
residuals = y[:, np.newaxis] - np.dot(X, coef_path_)
residuals_sum_squares = np.sum(residuals**2, axis=0)
degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int)
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
degrees_of_freedom[k] = np.sum(mask)
self.alphas_ = alphas_
if self.noise_variance is None:
self.noise_variance_ = self._estimate_noise_variance(
X, y, positive=self.positive
)
else:
self.noise_variance_ = self.noise_variance
self.criterion_ = (
n_samples * np.log(2 * np.pi * self.noise_variance_)
+ residuals_sum_squares / self.noise_variance_
+ criterion_factor * degrees_of_freedom
)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
def _estimate_noise_variance(self, X, y, positive):
"""Compute an estimate of the variance with an OLS model.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data to be fitted by the OLS model. We expect the data to be
centered.
y : ndarray of shape (n_samples,)
Associated target.
positive : bool, default=False
Restrict coefficients to be >= 0. This should be inline with
the `positive` parameter from `LassoLarsIC`.
Returns
-------
noise_variance : float
An estimator of the noise variance of an OLS model.
"""
if X.shape[0] <= X.shape[1] + self.fit_intercept:
raise ValueError(
f"You are using {self.__class__.__name__} in the case where the number "
"of samples is smaller than the number of features. In this setting, "
"getting a good estimate for the variance of the noise is not "
"possible. Provide an estimate of the noise variance in the "
"constructor."
)
# X and y are already centered and we don't need to fit with an intercept
ols_model = LinearRegression(positive=positive, fit_intercept=False)
y_pred = ols_model.fit(X, y).predict(X)
return np.sum((y - y_pred) ** 2) / (
X.shape[0] - X.shape[1] - self.fit_intercept
)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@linear_model@[email protected]_END.py
|
{
"filename": "linsolve.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/sparse/linalg/_dsolve/linsolve.py",
"type": "Python"
}
|
from warnings import warn, catch_warnings, simplefilter
import numpy as np
from numpy import asarray
from scipy.sparse import (issparse, SparseEfficiencyWarning,
csr_array, csc_array, eye_array, diags_array)
from scipy.sparse._sputils import (is_pydata_spmatrix, convert_pydata_sparse_to_scipy,
get_index_dtype, safely_cast_index_arrays)
from scipy.linalg import LinAlgError
import copy
import threading
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = threading.local()
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning', 'spsolve_triangular', 'is_sptriangular', 'spbandwidth']
class MatrixRankWarning(UserWarning):
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
if ``scikits.umfpack`` is installed. Default: True
assumeSortedIndices : bool, optional
Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
Has effect only if useUmfpack is True and ``scikits.umfpack`` is
installed. Default: False
Notes
-----
The default sparse solver is UMFPACK when available
(``scikits.umfpack`` is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
References
----------
.. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [2] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import use_solver, spsolve
>>> from scipy.sparse import csc_array
>>> R = np.random.randn(5, 5)
>>> A = csc_array(R)
>>> b = np.random.randn(5)
>>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
>>> x = spsolve(A, b)
>>> np.allclose(A.dot(x), b)
True
>>> use_solver(useUmfpack=True) # reset umfPack usage to default
"""
global useUmfpack
if 'useUmfpack' in kwargs:
useUmfpack.u = kwargs['useUmfpack']
if useUmfpack.u and 'assumeSortedIndices' in kwargs:
umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
# A.dtype.name can only be "float64" or
# "complex128" in control flow
f_type = getattr(np, A.dtype.name)
# control flow may allow for more index
# types to get through here
i_type = getattr(np, A.indices.dtype.name)
try:
family = _families[(f_type, i_type)]
except KeyError as e:
msg = ('only float64 or complex128 matrices with int32 or int64 '
f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
raise ValueError(msg) from e
# See gh-8278. Considered converting only if
# A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
# but that didn't always fix the issue.
family = family[0] + "l"
A_new = copy.copy(A)
A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
A_new.indices = np.asarray(A.indices, dtype=np.int64)
return family, A_new
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse array or matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse array or matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
use_umfpack : bool, optional
if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
[6]_ . This is only referenced if b is a vector and
``scikits.umfpack`` is installed.
Returns
-------
x : ndarray or sparse array or matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
References
----------
.. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
COLAMD, an approximate column minimum degree ordering algorithm,
ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
:doi:`10.1145/1024074.1024080`
.. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
minimum degree ordering algorithm, ACM Trans. on Mathematical
Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
.. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [4] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve
>>> A = csc_array([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> B = csc_array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve(A, B)
>>> np.allclose(A.dot(x).toarray(), B.toarray())
True
"""
is_pydata_sparse = is_pydata_spmatrix(b)
pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
A = convert_pydata_sparse_to_scipy(A)
b = convert_pydata_sparse_to_scipy(b)
if not (issparse(A) and A.format in ("csc", "csr")):
A = csc_array(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning, stacklevel=2)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = issparse(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
result_dtype = np.promote_types(A.dtype, b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError(f"matrix must be square (has shape {(M, N)})")
if M != b.shape[0]:
raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
if not hasattr(useUmfpack, 'u'):
useUmfpack.u = not noScikit
use_umfpack = use_umfpack and useUmfpack.u
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack.u = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if A.format == "csc":
flag = 1 # CSC format
else:
flag = 0 # CSR format
indices = A.indices.astype(np.intc, copy=False)
indptr = A.indptr.astype(np.intc, copy=False)
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not (b.format == "csc" or is_pydata_spmatrix(b)):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format',
SparseEfficiencyWarning, stacklevel=2)
b = csc_array(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = b[:, j].toarray().ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.full(segment_length, j, dtype=int))
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
idx_dtype = get_index_dtype(maxval=max(b.shape))
sparse_row = np.concatenate(row_segs, dtype=idx_dtype)
sparse_col = np.concatenate(col_segs, dtype=idx_dtype)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
if is_pydata_sparse:
x = pydata_sparse_cls.from_scipy_sparse(x)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
relax=None, panel_size=None, options=None):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse array or matrix
Sparse array to factorize. Most efficient when provided in CSC
format. Other formats will be converted to CSC before factorization.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import splu
>>> A = csc_array([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = splu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
A_cls = type(A)
def csc_construct_func(*a, cls=A_cls):
return cls.from_scipy_sparse(csc_array(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_array
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = safely_cast_index_arrays(A, np.intc, "SuperLU")
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse array to factorize. Most efficient when provided in CSC format.
Other formats will be converted to CSC before factorization.
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spilu
>>> A = csc_array([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = spilu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
A_cls = type(A)
def csc_construct_func(*a, cls=A_cls):
return cls.from_scipy_sparse(csc_array(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_array
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('spilu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = safely_cast_index_arrays(A, np.intc, "SuperLU")
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=True, options=_options)
def factorized(A):
"""
Return a function for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input. A in CSC format is most efficient. A CSR format matrix will
be converted to CSC before factorization.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import factorized
>>> from scipy.sparse import csc_array
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(csc_array(A)) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
if not hasattr(useUmfpack, 'u'):
useUmfpack.u = not noScikit
if useUmfpack.u:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
A = A._asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack.u = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
# Make LU decomposition.
umf.numeric(A)
def solve(b):
with np.errstate(divide="ignore", invalid="ignore"):
# Ignoring warnings with numpy >= 1.23.0, see gh-16523
result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return result
return solve
else:
return splu(A).solve
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
unit_diagonal=False):
"""
Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
Parameters
----------
A : (M, M) sparse array or matrix
A sparse square triangular matrix. Should be in CSR or CSC format.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``A x = b``
lower : bool, optional
Whether `A` is a lower or upper triangular matrix.
Default is lower triangular matrix.
overwrite_A : bool, optional
Allow changing `A`.
Enabling gives a performance gain. Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b`.
Enabling gives a performance gain. Default is False.
If `overwrite_b` is True, it should be ensured that
`b` has an appropriate dtype to be able to store the result.
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1.
.. versionadded:: 1.4.0
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``A x = b``. Shape of return matches shape
of `b`.
Raises
------
LinAlgError
If `A` is singular or not triangular.
ValueError
If shape of `A` or shape of `b` do not match the requirements.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve_triangular
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve_triangular(A, B)
>>> np.allclose(A.dot(x), B)
True
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
trans = "N"
if issparse(A) and A.format == "csr":
A = A.T
trans = "T"
lower = not lower
if not (issparse(A) and A.format == "csc"):
warn('CSC or CSR matrix format is required. Converting to CSC matrix.',
SparseEfficiencyWarning, stacklevel=2)
A = csc_array(A)
elif not overwrite_A:
A = A.copy()
M, N = A.shape
if M != N:
raise ValueError(
f'A must be a square matrix but its shape is {A.shape}.')
if unit_diagonal:
with catch_warnings():
simplefilter('ignore', SparseEfficiencyWarning)
A.setdiag(1)
else:
diag = A.diagonal()
if np.any(diag == 0):
raise LinAlgError(
'A is singular: zero entry on diagonal.')
invdiag = 1/diag
if trans == "N":
A = A @ diags_array(invdiag)
else:
A = (A.T @ diags_array(invdiag)).T
# sum duplicates for non-canonical format
A.sum_duplicates()
b = np.asanyarray(b)
if b.ndim not in [1, 2]:
raise ValueError(
f'b must have 1 or 2 dims but its shape is {b.shape}.')
if M != b.shape[0]:
raise ValueError(
'The size of the dimensions of A must be equal to '
'the size of the first dimension of b but the shape of A is '
f'{A.shape} and the shape of b is {b.shape}.'
)
result_dtype = np.promote_types(np.promote_types(A.dtype, np.float32), b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
elif not overwrite_b:
b = b.copy()
if lower:
L = A
U = csc_array((N, N), dtype=result_dtype)
else:
L = eye_array(N, dtype=result_dtype, format='csc')
U = A
U.setdiag(0)
x, info = _superlu.gstrs(trans,
N, L.nnz, L.data, L.indices, L.indptr,
N, U.nnz, U.data, U.indices, U.indptr,
b)
if info:
raise LinAlgError('A is singular.')
if not unit_diagonal:
invdiag = invdiag.reshape(-1, *([1] * (len(x.shape) - 1)))
x = x * invdiag
return x
def is_sptriangular(A):
"""Returns 2-tuple indicating lower/upper triangular structure for sparse ``A``
Checks for triangular structure in ``A``. The result is summarized in
two boolean values ``lower`` and ``upper`` to designate whether ``A`` is
lower triangular or upper triangular respectively. Diagonal ``A`` will
result in both being True. Non-triangular structure results in False for both.
Only the sparse structure is used here. Values are not checked for zeros.
This function will convert a copy of ``A`` to CSC format if it is not already
CSR or CSC format. So it may be more efficient to convert it yourself if you
have other uses for the CSR/CSC version.
If ``A`` is not square, the portions outside the upper left square of the
matrix do not affect its triangular structure. You probably want to work
with the square portion of the matrix, though it is not requred here.
Parameters
----------
A : SciPy sparse array or matrix
A sparse matrix preferrably in CSR or CSC format.
Returns
-------
lower, upper : 2-tuple of bool
.. versionadded:: 1.15.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array, eye_array
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> scipy.sparse.linalg.is_sptriangular(A)
(True, False)
>>> D = eye_array((3,3), format='csr')
>>> scipy.sparse.linalg.is_sptriangular(D)
(True, True)
"""
if not (issparse(A) and A.format in ("csc", "csr", "coo", "dia", "dok", "lil")):
warn('is_sptriangular needs sparse and not BSR format. Converting to CSR.',
SparseEfficiencyWarning, stacklevel=2)
A = csr_array(A)
# bsr is better off converting to csr
if A.format == "dia":
return A.offsets.max() <= 0, A.offsets.min() >= 0
elif A.format == "coo":
rows, cols = A.coords
return (cols <= rows).all(), (cols >= rows).all()
elif A.format == "dok":
return all(c <= r for r, c in A.keys()), all(c >= r for r, c in A.keys())
elif A.format == "lil":
lower = all(col <= row for row, cols in enumerate(A.rows) for col in cols)
upper = all(col >= row for row, cols in enumerate(A.rows) for col in cols)
return lower, upper
# format in ("csc", "csr")
indptr, indices = A.indptr, A.indices
N = len(indptr) - 1
lower, upper = True, True
# check middle, 1st, last col (treat as CSC and switch at end if CSR)
for col in [N // 2, 0, -1]:
rows = indices[indptr[col]:indptr[col + 1]]
upper = upper and (col >= rows).all()
lower = lower and (col <= rows).all()
if not upper and not lower:
return False, False
# check all cols
cols = np.repeat(np.arange(N), np.diff(indptr))
rows = indices
upper = upper and (cols >= rows).all()
lower = lower and (cols <= rows).all()
if A.format == 'csr':
return upper, lower
return lower, upper
def spbandwidth(A):
"""Return the lower and upper bandwidth of a 2D numeric array.
Computes the lower and upper limits on the bandwidth of the
sparse 2D array ``A``. The result is summarized as a 2-tuple
of positive integers ``(lo, hi)``. A zero denotes no sub/super
diagonal entries on that side (tringular). The maximum value
for ``lo``(``hi``) is one less than the number of rows(cols).
Only the sparse structure is used here. Values are not checked for zeros.
Parameters
----------
A : SciPy sparse array or matrix
A sparse matrix preferrably in CSR or CSC format.
Returns
-------
below, above : 2-tuple of int
The distance to the farthest non-zero diagonal below/above the
main diagonal.
.. versionadded:: 1.15.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array, eye_array
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> scipy.sparse.linalg.spbandwidth(A)
(2, 0)
>>> D = eye_array((3,3), format='csr')
>>> scipy.sparse.linalg.spbandwidth(D)
(0, 0)
"""
if not (issparse(A) and A.format in ("csc", "csr", "coo", "dia", "dok")):
warn('spbandwidth needs sparse format not LIL and BSR. Converting to CSR.',
SparseEfficiencyWarning, stacklevel=2)
A = csr_array(A)
# bsr and lil are better off converting to csr
if A.format == "dia":
return max(0, -A.offsets.min().item()), max(0, A.offsets.max().item())
if A.format in ("csc", "csr"):
indptr, indices = A.indptr, A.indices
N = len(indptr) - 1
gap = np.repeat(np.arange(N), np.diff(indptr)) - indices
if A.format == 'csr':
gap = -gap
elif A.format == "coo":
gap = A.coords[1] - A.coords[0]
elif A.format == "dok":
gap = [(c - r) for r, c in A.keys()] + [0]
return -min(gap), max(gap)
return max(-np.min(gap).item(), 0), max(np.max(gap).item(), 0)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@sparse@linalg@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "xinglunju/pyAmor",
"repo_path": "pyAmor_extracted/pyAmor-master/README.md",
"type": "Markdown"
}
|
Python scripts to derive physical properties (linewidth, temperature, column density, etc.) using Ammonia hyperfine lines.
There are three version for now:
nh3_trot_v0.2_singlev.py - To go through a datacube and derive maps, assuming there is only one velocity.
pyamor_v0.4.py - To go through a datacube too, but there could be two velocity components.
ave_spec_g2853.py - To fit a single spectrum. Optimized for G28.53 for now.
|
xinglunjuREPO_NAMEpyAmorPATH_START.@pyAmor_extracted@[email protected]@.PATH_END.py
|
{
"filename": "allTimesRead.py",
"repo_name": "wmpg/Supracenter",
"repo_path": "Supracenter_extracted/Supracenter-master/supra/Fireballs/allTimesRead.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
from supra.Fireballs.Program import configRead, configParse, position, station
def readAllTimes(file_name, dists_name=[], typ=4, stn_list=[]):
#typ = 4 [perturbation, station, 0 - ballistic/ 1 - fragmentation, frag number (0 for ballistic)]
#typ = 6 [perturbation, station, 0 - ballistic/ 1 - fragmentation, zeniths, velocity]
zenith_list = [5, 45, 85]
velocity_list = [11, 15, 19, 23, 27, 31]
# lat_list = [49.5, 50.5, 49.0, 49.5, 49.5]
# lon_list = [10.0, 10.0, 10.0, 9.5, 10.5]
# titles = ['lat: 49.5 lon: 10.0', 'lat: 50.5 lon: 10.0', 'lat: 49.0 lon: 10.0', 'lat: 49.5 lon: 9.5', 'lat: 49.5 lon: 10.5']
c = ['y', 'm', 'c']
allTimes = np.array(np.load(file_name))
allDists = np.load(dists_name)
a = [None]*len(stn_list)
if typ == 6:
plt.style.use('dark_background')
for ii, stn in enumerate(stn_list):
fig = plt.figure()
ax = fig.add_subplot(111)
#print('//////////////////////////')
for ze_i, ze in enumerate(zenith_list):
#print("###############")
#print("Station: {:}".format(stn.code))
for j in range(len(velocity_list)):
#print("V = {:} km/s".format(velocity_list[j]))
#print(allTimes[ii, 0, ze_i, :, 0])
#print(allDists[0, ii, 0, ze_i, j])
for i in range(10):
print(allTimes[i].shape)
a[ii] = plt.plot(velocity_list, allTimes[i][ii, 0, ze_i, :, 0] - (allTimes[i][ii, 0, ze_i, 2, 0] + allTimes[i][ii, 0, ze_i, 3, 0])/2, alpha=0.2, c=c[ze_i])
if j == 1:
a[ii] = plt.plot(velocity_list, allTimes[0][ii, 0, ze_i, :, 0] - (allTimes[0][ii, 0, ze_i, 2, 0] + allTimes[0][ii, 0, ze_i, 3, 0])/2, label='Zenith: {:}'.format(ze), c=c[ze_i])
else:
a[ii] = plt.plot(velocity_list, allTimes[0][ii, 0, ze_i, :, 0] - (allTimes[0][ii, 0, ze_i, 2, 0] + allTimes[0][ii, 0, ze_i, 3, 0])/2, c=c[ze_i])
a[ii] = plt.plot([11, 31], [0, 0], 'w--')
ax.set_xlabel('Average Velocity (km/s)')
ax.set_ylabel('Relative Arrival Time (s)')
ax.set_ylim([-2, 2])
ax.set_title("{:}-{:}".format(stn.network.strip(), stn.code.strip()))
plt.legend()
a[ii] = plt.show()
if __name__ == '__main__':
file_name = '/home/luke/Desktop/perts/all_pick_times.npy'
dists_name = '/home/luke/Desktop/Seismic_data/2016-03-06 Stubenberg fireball THEO/all_pick_dists.npy'
typ = 6
arg_parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
arg_parser.add_argument('input_file', type=str, help='Path to Supracenter input file.')
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#################
setup = configRead(cml_args.input_file)
configParse(setup, 'picks')
picks_name = os.path.join(setup.working_directory, setup.fireball_name, 'data.txt')
picks_name = setup.station_picks_file
stn_list = []
with open(picks_name) as f:
for ii, line in enumerate(f):
if ii > 0:
line = line.split(',')
stn = station(line[1], line[2], position(float(line[3]), float(line[4]), float(line[5])), '...', '...', '...')
stn_list.append(stn)
readAllTimes(file_name, dists_name=dists_name, typ=typ, stn_list=stn_list)
if __name__ == '__main__':
pass
|
wmpgREPO_NAMESupracenterPATH_START.@Supracenter_extracted@Supracenter-master@supra@[email protected]@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/sankey/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey"
_path_str = "sankey.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.sankey.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@sankey@[email protected]_END.py
|
{
"filename": "tools.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/cli/langchain_cli/integration_template/integration_template/tools.py",
"type": "Python"
}
|
"""__ModuleName__ tools."""
from typing import Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
class __ModuleName__ToolInput(BaseModel):
"""Input schema for __ModuleName__ tool.
This docstring is **not** part of what is sent to the model when performing tool
calling. The Field default values and descriptions **are** part of what is sent to
the model when performing tool calling.
"""
# TODO: Add input args and descriptions.
a: int = Field(..., description="first number to add")
b: int = Field(..., description="second number to add")
class __ModuleName__Tool(BaseTool): # type: ignore[override]
"""__ModuleName__ tool.
Setup:
# TODO: Replace with relevant packages, env vars.
Install ``__package_name__`` and set environment variable ``__MODULE_NAME___API_KEY``.
.. code-block:: bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
Instantiation:
.. code-block:: python
tool = __ModuleName__Tool(
# TODO: init params
)
Invocation with args:
.. code-block:: python
# TODO: invoke args
tool.invoke({...})
.. code-block:: python
# TODO: output of invocation
Invocation with ToolCall:
.. code-block:: python
# TODO: invoke args
tool.invoke({"args": {...}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
# TODO: output of invocation
""" # noqa: E501
# TODO: Set tool name and description
name: str = "TODO: Tool name"
"""The name that is passed to the model when performing tool calling."""
description: str = "TODO: Tool description."
"""The description that is passed to the model when performing tool calling."""
args_schema: Type[BaseModel] = __ModuleName__ToolInput
"""The schema that is passed to the model when performing tool calling."""
# TODO: Add any other init params for the tool.
# param1: Optional[str]
# """param1 determines foobar"""
# TODO: Replaced (a, b) with real tool arguments.
def _run(
self, a: int, b: int, *, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
return str(a + b + 80)
# TODO: Implement if tool has native async functionality, otherwise delete.
# async def _arun(
# self,
# a: int,
# b: int,
# *,
# run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
# ) -> str:
# ...
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@cli@langchain_cli@integration_template@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astrolabsoftware/fink-science",
"repo_path": "fink-science_extracted/fink-science-master/fink_science/ssoft/__init__.py",
"type": "Python"
}
|
astrolabsoftwareREPO_NAMEfink-sciencePATH_START.@fink-science_extracted@fink-science-master@fink_science@ssoft@[email protected]_END.py
|
|
{
"filename": "_tickvals.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/marker/colorbar/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="histogram.marker.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram@marker@colorbar@[email protected]_END.py
|
{
"filename": "corner_ratios.py",
"repo_name": "eXtremeGravityInstitute/QuickCBC",
"repo_path": "QuickCBC_extracted/QuickCBC-main/corner_ratios.py",
"type": "Python"
}
|
import argparse
import glob
import matplotlib
matplotlib.use('Agg')
import corner
import numpy as np
data = np.loadtxt("ratios.dat", usecols=(0, 1, 2))
figure = corner.corner(data, labels=[r'$\log_{10}(H/L)$', r'$\log_{10}(H/V)$', r'$\log_{10}(L/V)$'], bins=40, title_kwargs={"fontsize": 12}, label_kwargs={"fontsize": 14}, show_titles=True)
figure.savefig("corner_ratios.png", dpi=300)
|
eXtremeGravityInstituteREPO_NAMEQuickCBCPATH_START.@QuickCBC_extracted@QuickCBC-main@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram/stream/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram@stream@[email protected]_END.py
|
{
"filename": "search.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/tools/gmail/search.py",
"type": "Python"
}
|
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool): # type: ignore[override, override]
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = ""
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get("Content-Disposition"))
if ctype == "text/plain" and "attachment" not in cdispo:
try:
message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
except UnicodeDecodeError:
message_body = part.get_payload(decode=True).decode( # type: ignore[union-attr]
"latin-1"
)
break
else:
message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
"from": email_msg["From"],
"date": email_msg["Date"],
"to": email_msg["To"],
"cc": email_msg["Cc"],
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@tools@[email protected]@.PATH_END.py
|
{
"filename": "LifeCycleTest.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/containerTests/lifecycleTest/src/lifecycleTestImpl/LifeCycleTest.py",
"type": "Python"
}
|
#! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# Copyright (c) European Southern Observatory, 2011
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# "@(#) $Id: LifeCycleTest.py,v 1.2 2011/10/28 14:52:25 hsommer Exp $"
#
# who when what
# -------- ---------- -------------------------------------------------------
# dfugate 2003/08/20 Created.
#--REGULAR IMPORTS-------------------------------------------------------------
#--CORBA STUBS-----------------------------------------------------------------
import lifecycleTest__POA
import lifecycleTest
#--ACS Imports-----------------------------------------------------------------
from Acspy.Servants.ContainerServices import ContainerServices
from Acspy.Servants.ComponentLifecycle import ComponentLifecycle
from Acspy.Servants.ACSComponent import ACSComponent
#--GLOBALS---------------------------------------------------------------------
#------------------------------------------------------------------------------
class LifeCycleTest(lifecycleTest__POA.TestLifeCycleComp, #CORBA stubs for IDL interface
ACSComponent, #Base IDL interface
ContainerServices, #Developer niceties
ComponentLifecycle): #HLA stuff
'''
Simple component implementation provided as a reference for developers.
'''
def __init__(self):
'''
Just call superclass constructors here.
'''
ACSComponent.__init__(self)
ContainerServices.__init__(self)
return
#------------------------------------------------------------------------------
#--Implementation of IDL methods-----------------------------------------------
#------------------------------------------------------------------------------
def dummyInterface(self):
'''
Python implementation of IDL method.
'''
self.getLogger().logInfo("dummyInterface called...")
#------------------------------------------------------------------------------
#--Main defined only for generic testing---------------------------------------
#------------------------------------------------------------------------------
if __name__ == "__main__":
print "Creating an object"
g = LifeCycleTest()
print "Done..."
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@containerTests@lifecycleTest@src@[email protected]@.PATH_END.py
|
{
"filename": "ttx.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/ttx.py",
"type": "Python"
}
|
"""\
usage: ttx [options] inputfile1 [... inputfileN]
TTX -- From OpenType To XML And Back
If an input file is a TrueType or OpenType font file, it will be
decompiled to a TTX file (an XML-based text format).
If an input file is a TTX file, it will be compiled to whatever
format the data is in, a TrueType or OpenType/CFF font file.
A special input value of - means read from the standard input.
Output files are created so they are unique: an existing file is
never overwritten.
General options
===============
-h Help print this message.
--version show version and exit.
-d <outputfolder> Specify a directory where the output files are
to be created.
-o <outputfile> Specify a file to write the output to. A special
value of - would use the standard output.
-f Overwrite existing output file(s), ie. don't append
numbers.
-v Verbose: more messages will be written to stdout
about what is being done.
-q Quiet: No messages will be written to stdout about
what is being done.
-a allow virtual glyphs ID's on compile or decompile.
Dump options
============
-l List table info: instead of dumping to a TTX file, list
some minimal info about each table.
-t <table> Specify a table to dump. Multiple -t options
are allowed. When no -t option is specified, all tables
will be dumped.
-x <table> Specify a table to exclude from the dump. Multiple
-x options are allowed. -t and -x are mutually exclusive.
-s Split tables: save the TTX data into separate TTX files per
table and write one small TTX file that contains references
to the individual table dumps. This file can be used as
input to ttx, as long as the table files are in the
same directory.
-g Split glyf table: Save the glyf data into separate TTX files
per glyph and write a small TTX for the glyf table which
contains references to the individual TTGlyph elements.
NOTE: specifying -g implies -s (no need for -s together
with -g)
-i Do NOT disassemble TT instructions: when this option is
given, all TrueType programs (glyph programs, the font
program and the pre-program) will be written to the TTX
file as hex data instead of assembly. This saves some time
and makes the TTX file smaller.
-z <format> Specify a bitmap data export option for EBDT:
{'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
{'raw', 'extfile'} Each option does one of the following:
-z raw
export the bitmap data as a hex dump
-z row
export each row as hex data
-z bitwise
export each row as binary in an ASCII art style
-z extfile
export the data as external files with XML references
If no export format is specified 'raw' format is used.
-e Don't ignore decompilation errors, but show a full traceback
and abort.
-y <number> Select font number for TrueType Collection (.ttc/.otc),
starting from 0.
--unicodedata <UnicodeData.txt>
Use custom database file to write character names in the
comments of the cmap TTX output.
--newline <value>
Control how line endings are written in the XML file. It
can be 'LF', 'CR', or 'CRLF'. If not specified, the
default platform-specific line endings are used.
Compile options
===============
-m Merge with TrueType-input-file: specify a TrueType or
OpenType font file to be merged with the TTX file. This
option is only valid when at most one TTX file is specified.
-b Don't recalc glyph bounding boxes: use the values in the
TTX file as-is.
--recalc-timestamp
Set font 'modified' timestamp to current time.
By default, the modification time of the TTX file will be
used.
--no-recalc-timestamp
Keep the original font 'modified' timestamp.
--flavor <type>
Specify flavor of output font file. May be 'woff' or 'woff2'.
Note that WOFF2 requires the Brotli Python extension,
available at https://github.com/google/brotli
--with-zopfli
Use Zopfli instead of Zlib to compress WOFF. The Python
extension is available at https://pypi.python.org/pypi/zopfli
"""
from fontTools.ttLib import TTFont, TTLibError
from fontTools.misc.macCreatorType import getMacCreatorAndType
from fontTools.unicode import setUnicodeData
from fontTools.misc.textTools import Tag, tostr
from fontTools.misc.timeTools import timestampSinceEpoch
from fontTools.misc.loggingTools import Timer
from fontTools.misc.cliTools import makeOutputFileName
import os
import sys
import getopt
import re
import logging
log = logging.getLogger("fontTools.ttx")
opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
class Options(object):
listTables = False
outputDir = None
outputFile = None
overWrite = False
verbose = False
quiet = False
splitTables = False
splitGlyphs = False
disassembleInstructions = True
mergeFile = None
recalcBBoxes = True
ignoreDecompileErrors = True
bitmapGlyphDataFormat = "raw"
unicodedata = None
newlinestr = "\n"
recalcTimestamp = None
flavor = None
useZopfli = False
def __init__(self, rawOptions, numFiles):
self.onlyTables = []
self.skipTables = []
self.fontNumber = -1
for option, value in rawOptions:
# general options
if option == "-h":
print(__doc__)
sys.exit(0)
elif option == "--version":
from fontTools import version
print(version)
sys.exit(0)
elif option == "-d":
if not os.path.isdir(value):
raise getopt.GetoptError(
"The -d option value must be an existing directory"
)
self.outputDir = value
elif option == "-o":
self.outputFile = value
elif option == "-f":
self.overWrite = True
elif option == "-v":
self.verbose = True
elif option == "-q":
self.quiet = True
# dump options
elif option == "-l":
self.listTables = True
elif option == "-t":
# pad with space if table tag length is less than 4
value = value.ljust(4)
self.onlyTables.append(value)
elif option == "-x":
# pad with space if table tag length is less than 4
value = value.ljust(4)
self.skipTables.append(value)
elif option == "-s":
self.splitTables = True
elif option == "-g":
# -g implies (and forces) splitTables
self.splitGlyphs = True
self.splitTables = True
elif option == "-i":
self.disassembleInstructions = False
elif option == "-z":
validOptions = ("raw", "row", "bitwise", "extfile")
if value not in validOptions:
raise getopt.GetoptError(
"-z does not allow %s as a format. Use %s"
% (option, validOptions)
)
self.bitmapGlyphDataFormat = value
elif option == "-y":
self.fontNumber = int(value)
# compile options
elif option == "-m":
self.mergeFile = value
elif option == "-b":
self.recalcBBoxes = False
elif option == "-e":
self.ignoreDecompileErrors = False
elif option == "--unicodedata":
self.unicodedata = value
elif option == "--newline":
validOptions = ("LF", "CR", "CRLF")
if value == "LF":
self.newlinestr = "\n"
elif value == "CR":
self.newlinestr = "\r"
elif value == "CRLF":
self.newlinestr = "\r\n"
else:
raise getopt.GetoptError(
"Invalid choice for --newline: %r (choose from %s)"
% (value, ", ".join(map(repr, validOptions)))
)
elif option == "--recalc-timestamp":
self.recalcTimestamp = True
elif option == "--no-recalc-timestamp":
self.recalcTimestamp = False
elif option == "--flavor":
self.flavor = value
elif option == "--with-zopfli":
self.useZopfli = True
if self.verbose and self.quiet:
raise getopt.GetoptError("-q and -v options are mutually exclusive")
if self.verbose:
self.logLevel = logging.DEBUG
elif self.quiet:
self.logLevel = logging.WARNING
else:
self.logLevel = logging.INFO
if self.mergeFile and self.flavor:
raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
if self.onlyTables and self.skipTables:
raise getopt.GetoptError("-t and -x options are mutually exclusive")
if self.mergeFile and numFiles > 1:
raise getopt.GetoptError(
"Must specify exactly one TTX source file when using -m"
)
if self.flavor != "woff" and self.useZopfli:
raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
def ttList(input, output, options):
ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
reader = ttf.reader
tags = sorted(reader.keys())
print('Listing table info for "%s":' % input)
format = " %4s %10s %8s %8s"
print(format % ("tag ", " checksum", " length", " offset"))
print(format % ("----", "----------", "--------", "--------"))
for tag in tags:
entry = reader.tables[tag]
if ttf.flavor == "woff2":
# WOFF2 doesn't store table checksums, so they must be calculated
from fontTools.ttLib.sfnt import calcChecksum
data = entry.loadData(reader.transformBuffer)
checkSum = calcChecksum(data)
else:
checkSum = int(entry.checkSum)
if checkSum < 0:
checkSum = checkSum + 0x100000000
checksum = "0x%08X" % checkSum
print(format % (tag, checksum, entry.length, entry.offset))
print()
ttf.close()
@Timer(log, "Done dumping TTX in %(time).3f seconds")
def ttDump(input, output, options):
input_name = input
if input == "-":
input, input_name = sys.stdin.buffer, sys.stdin.name
output_name = output
if output == "-":
output, output_name = sys.stdout, sys.stdout.name
log.info('Dumping "%s" to "%s"...', input_name, output_name)
if options.unicodedata:
setUnicodeData(options.unicodedata)
ttf = TTFont(
input,
0,
ignoreDecompileErrors=options.ignoreDecompileErrors,
fontNumber=options.fontNumber,
)
ttf.saveXML(
output,
tables=options.onlyTables,
skipTables=options.skipTables,
splitTables=options.splitTables,
splitGlyphs=options.splitGlyphs,
disassembleInstructions=options.disassembleInstructions,
bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
newlinestr=options.newlinestr,
)
ttf.close()
@Timer(log, "Done compiling TTX in %(time).3f seconds")
def ttCompile(input, output, options):
input_name = input
if input == "-":
input, input_name = sys.stdin, sys.stdin.name
output_name = output
if output == "-":
output, output_name = sys.stdout.buffer, sys.stdout.name
log.info('Compiling "%s" to "%s"...' % (input_name, output))
if options.useZopfli:
from fontTools.ttLib import sfnt
sfnt.USE_ZOPFLI = True
ttf = TTFont(
options.mergeFile,
flavor=options.flavor,
recalcBBoxes=options.recalcBBoxes,
recalcTimestamp=options.recalcTimestamp,
)
ttf.importXML(input)
if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
# use TTX file modification time for head "modified" timestamp
mtime = os.path.getmtime(input)
ttf["head"].modified = timestampSinceEpoch(mtime)
ttf.save(output)
def guessFileType(fileName):
if fileName == "-":
header = sys.stdin.buffer.peek(256)
ext = ""
else:
base, ext = os.path.splitext(fileName)
try:
with open(fileName, "rb") as f:
header = f.read(256)
except IOError:
return None
if header.startswith(b"\xef\xbb\xbf<?xml"):
header = header.lstrip(b"\xef\xbb\xbf")
cr, tp = getMacCreatorAndType(fileName)
if tp in ("sfnt", "FFIL"):
return "TTF"
if ext == ".dfont":
return "TTF"
head = Tag(header[:4])
if head == "OTTO":
return "OTF"
elif head == "ttcf":
return "TTC"
elif head in ("\0\1\0\0", "true"):
return "TTF"
elif head == "wOFF":
return "WOFF"
elif head == "wOF2":
return "WOFF2"
elif head == "<?xm":
# Use 'latin1' because that can't fail.
header = tostr(header, "latin1")
if opentypeheaderRE.search(header):
return "OTX"
else:
return "TTX"
return None
def parseOptions(args):
rawOptions, files = getopt.gnu_getopt(
args,
"ld:o:fvqht:x:sgim:z:baey:",
[
"unicodedata=",
"recalc-timestamp",
"no-recalc-timestamp",
"flavor=",
"version",
"with-zopfli",
"newline=",
],
)
options = Options(rawOptions, len(files))
jobs = []
if not files:
raise getopt.GetoptError("Must specify at least one input file")
for input in files:
if input != "-" and not os.path.isfile(input):
raise getopt.GetoptError('File not found: "%s"' % input)
tp = guessFileType(input)
if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"):
extension = ".ttx"
if options.listTables:
action = ttList
else:
action = ttDump
elif tp == "TTX":
extension = "." + options.flavor if options.flavor else ".ttf"
action = ttCompile
elif tp == "OTX":
extension = "." + options.flavor if options.flavor else ".otf"
action = ttCompile
else:
raise getopt.GetoptError('Unknown file type: "%s"' % input)
if options.outputFile:
output = options.outputFile
else:
if input == "-":
raise getopt.GetoptError("Must provide -o when reading from stdin")
output = makeOutputFileName(
input, options.outputDir, extension, options.overWrite
)
# 'touch' output file to avoid race condition in choosing file names
if action != ttList:
open(output, "a").close()
jobs.append((action, input, output))
return jobs, options
def process(jobs, options):
for action, input, output in jobs:
action(input, output, options)
def main(args=None):
"""Convert OpenType fonts to XML and back"""
from fontTools import configLogger
if args is None:
args = sys.argv[1:]
try:
jobs, options = parseOptions(args)
except getopt.GetoptError as e:
print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr)
sys.exit(2)
configLogger(level=options.logLevel)
try:
process(jobs, options)
except KeyboardInterrupt:
log.error("(Cancelled.)")
sys.exit(1)
except SystemExit:
raise
except TTLibError as e:
log.error(e)
sys.exit(1)
except:
log.exception("Unhandled exception has occurred")
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@[email protected]@.PATH_END.py
|
{
"filename": "dust.py",
"repo_name": "ArisTr/PyRaTE",
"repo_path": "PyRaTE_extracted/PyRaTE-master/PyRaTE/dust.py",
"type": "Python"
}
|
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# NAME: #
# #
# dust.py #
# #
# #
# DESCRIPTION: #
# #
# Python script for calculating the absorption/extinction coefficients #
# for a specific frequency. 3 types of spherical grains are assumed. #
# #
# i.) ices (dirty ices + coated silicates) #
# ii.) silicates #
# iii.) amorphous carbon #
# #
# Optical data has been taken from Preibisch et al. (1993) #
# #
# http://articles.adsabs.harvard.edu/full/1993A%26A...279..577P #
# #
# but the amount of dust has been reduced to be compatible with the #
# Pollack et al. (1985) model used in the hydrodynamical calculations. #
# #
# http://www.sciencedirect.com/science/article/pii/0019103585900697 #
# #
# 1. From freq -> Q_abs & Q_ext #
# 2. k_abs=(n1*Q_abs1*pi*r1^2+n2*Q_abs2*pi*r2^2+n3*Q_abs3*pi*r3^2)/(n1+n2+n3)#
# 3. k_ext=(n1*Q_ext1*pi*r1^2+n2*Q_ext2*pi*r2^2+n3*Q_ext3*pi*r3^2)/(n1+n2+n3)#
# 4. Qext = Qscat + Qabsorp #
# #
# #
# PARAMETERS: #
# #
# Input : freq, T, dens_grid_point, B_v #
# Output : S_c (continuum source function), k_ext (opacity) #
# #
# AUTHORS: #
# #
# Aris E. Tritsis #
# ([email protected]) #
# #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
import numpy as np #
from scipy.interpolate import interp1d #
from scipy.constants import m_p, h, c, k #
#
amu=2.4237981621576 #
m_p = m_p * 1e+3 #
h=h*1.e+7 #
c=c*1.e+2 #
K_b=k*1.e+7 #
#- - - - - - - - - - - - - -abundance- - - - - - - - - - - - - - - - - - - - #
#- - - - - - -This is actually grain mass and not abundance!- - - - - - - - -#
abundance_g_sp1=12.001 # 12.218
abundance_g_sp2=01.000 #
abundance_g_sp3=14.170 # 14.387
#
abundance_g_log=np.array([abundance_g_sp1, abundance_g_sp2, abundance_g_sp3])#
abundance_g=10.**abundance_g_log #
#- - - - - - - - - - - radius=10^{radius_g_sp*} - - - - - - - - - - - - - - -#
#- - - - -This starts off as radius but is the area afterwords- - - - - - - -#
radius_g_sp1=-5.222 #
radius_g_sp2=-5.281 #
radius_g_sp3=-6.024 #
#
radius_g_log=np.array([radius_g_sp1, radius_g_sp2, radius_g_sp3]) #
radius_g=10.**(radius_g_log) #
radius_g=np.pi*radius_g**2. #
#- - - - -sublimation temperature=10^{sublimation_T_g_sp1*}- - - - - - - - - #
sublimation_T_g_sp1=2.097 #
sublimation_T_g_sp2=3.176 #
sublimation_T_g_sp3=3.301 #
#
sublimation_T_g_log=np.array([sublimation_T_g_sp1, sublimation_T_g_sp2, sublimation_T_g_sp3])
sublimation_T_g=10.**sublimation_T_g_log #
#- - -This is a parameter that controls how the varius dust components- - - -#
#- - - -change from one species to another depending the temperature- - - - -#
fract_g_sp1 = 0.0 #
fract_g_sp2 = 0.5 #
fract_g_sp3 = 11.20 #
#
fract_g=np.array([fract_g_sp1, fract_g_sp2, fract_g_sp3]) #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# #
# /\ #
# / \ #
# / || \ DATA FOR DUST GRAIN MODEL ADOPTED #
# / || \ #
# / .. \ !!! DO NOT TOUCH !!! #
# / \ #
# ------------ #
# #
# #
# Qext = efficiency factor for extinction #
# Qext = Qsca + Qabs #
# albedo = Qsca/Qext #
# #
freqs=[2.00000E+00, 3.33333E+00, 3.70370E+00, 5.00000E+00, 7.69231E+00, #
9.09091E+00, 1.25000E+01, 1.66667E+01, 1.77828E+01, 2.22222E+01, #
2.85710E+01, 3.20000E+01, 4.00000E+01, 5.00000E+01, 6.40000E+01, #
8.00000E+01, 1.00000E+02, 1.20000E+02, 1.40000E+02, 1.66667E+02, #
1.80000E+02, 2.00000E+02, 2.20000E+02, 2.50000E+02, 3.00000E+02, #
3.50000E+02, 4.00000E+02, 4.76190E+02, 5.40000E+02, 6.50000E+02, #
7.70000E+02, 9.43396E+02, 9.95250E+02, 1.05250E+03, 1.15250E+03, #
1.30000E+03, 1.50000E+03, 1.80000E+03, 2.00000E+03, 2.50000E+03, #
2.77777E+03, 2.90000E+03, 3.10000E+03, 3.25730E+03, 3.35730E+03, #
3.80000E+03, 4.50450E+03, 5.20000E+03, 6.00000E+03, 7.00000E+03, #
8.00000E+03, 1.00000E+04, 1.11111E+04, 1.47059E+04, 1.81818E+04, #
2.29885E+04, 2.85714E+04, 3.57143E+04, 4.16667E+04, 4.60000E+04, #
4.80000E+04, 5.88235E+04, 7.14286E+04, 1.00000E+05] #
#
#
Qext1=[8.27489E-07, 2.02616E-06, 2.44296E-06, 4.18020E-06, 9.13350E-06, #
1.24081E-05, 2.23674E-05, 4.04385E-05, 4.61024E-05, 7.30494E-05, #
1.21229E-04, 1.51303E-04, 2.35879E-04, 3.62494E-04, 5.72686E-04, #
8.70621E-04, 1.41357E-03, 2.27443E-03, 3.25876E-03, 4.66608E-03, #
5.34037E-03, 7.17867E-03, 9.51324E-03, 1.09013E-02, 1.22257E-02, #
1.55723E-02, 2.03591E-02, 2.92170E-02, 3.51293E-02, 2.85351E-02, #
3.85253E-02, 7.58988E-02, 8.60842E-02, 9.05677E-02, 5.08731E-02, #
2.00234E-02, 1.41722E-02, 1.55068E-02, 1.78970E-02, 2.88108E-02, #
3.77510E-02, 4.22064E-02, 6.00223E-02, 7.91517E-02, 8.38710E-02, #
5.84425E-02, 7.88446E-02, 1.02178E-01, 1.21741E-01, 1.60265E-01, #
1.91499E-01, 2.72786E-01, 3.14100E-01, 4.75036E-01, 6.14420E-01, #
8.86304E-01, 1.24287E+00, 1.60431E+00, 2.17989E+00, 2.35907E+00, #
2.41101E+00, 2.99702E+00, 2.55485E+00, 2.32977E+00] #
#
albedo1=[3.92246E-08, 1.23610E-07, 1.56259E-07, 3.03332E-07, 7.77812E-07, #
1.11697E-06, 2.21502E-06, 3.87163E-06, 4.40139E-06, 6.77444E-06, #
1.11414E-05, 1.40460E-05, 2.19783E-05, 3.48768E-05, 5.92757E-05, #
9.54988E-05, 1.46429E-04, 1.89017E-04, 2.44381E-04, 3.33987E-04, #
3.99728E-04, 4.61869E-04, 4.91212E-04, 6.17219E-04, 1.10168E-03, #
1.63331E-03, 2.12169E-03, 2.73240E-03, 3.07663E-03, 5.96621E-03, #
1.18191E-02, 1.13103E-02, 1.03595E-02, 8.77175E-03, 6.75705E-03, #
2.41256E-02, 1.13157E-01, 2.44006E-01, 3.36055E-01, 4.85140E-01, #
4.95583E-01, 4.97225E-01, 4.23542E-01, 3.20571E-01, 2.99108E-01, #
6.48106E-01, 6.82296E-01, 7.11854E-01, 7.21794E-01, 7.38369E-01, #
7.47027E-01, 7.59237E-01, 7.66654E-01, 7.86210E-01, 7.94317E-01, #
8.11670E-01, 8.21779E-01, 8.33382E-01, 8.50948E-01, 8.38990E-01, #
8.15905E-01, 6.58103E-01, 4.68622E-01, 4.48626E-01] #
#
Qext2=[4.56192E-07, 1.26718E-06, 1.56440E-06, 2.85106E-06, 6.74563E-06, #
9.42119E-06, 1.78219E-05, 3.17052E-05, 3.60991E-05, 5.64476E-05, #
9.35289E-05, 1.17433E-04, 1.84203E-04, 2.88866E-04, 4.77576E-04, #
7.57424E-04, 1.19947E-03, 1.76908E-03, 2.44707E-03, 3.56295E-03, #
4.19929E-03, 5.24419E-03, 6.40814E-03, 8.35551E-03, 1.21111E-02, #
1.66052E-02, 2.20507E-02, 3.24599E-02, 4.00471E-02, 2.86316E-02, #
3.31294E-02, 8.50940E-02, 1.02609E-01, 1.13549E-01, 6.16984E-02, #
2.03921E-02, 1.00506E-02, 1.17356E-02, 1.35383E-02, 2.16125E-02, #
2.72593E-02, 2.95763E-02, 3.32118E-02, 3.64441E-02, 3.88451E-02, #
5.23374E-02, 6.73230E-02, 9.06687E-02, 1.11919E-01, 1.39909E-01, #
1.76101E-01, 2.39293E-01, 2.90852E-01, 4.31671E-01, 5.74578E-01, #
8.52241E-01, 1.12880E+00, 1.59701E+00, 2.12380E+00, 2.52193E+00, #
2.57567E+00, 3.45300E+00, 2.82181E+00, 2.65350E+00] #
#
albedo2=[5.03588E-08, 1.39890E-07, 1.72707E-07, 3.14778E-07, 7.45386E-07, #
1.04120E-06, 1.96752E-06, 3.49593E-06, 3.97950E-06, 6.20690E-06, #
1.02379E-05, 1.28347E-05, 1.99828E-05, 3.11464E-05, 5.06395E-05, #
7.80006E-05, 1.20776E-04, 1.69645E-04, 2.28264E-04, 3.13364E-04, #
3.61733E-04, 4.43295E-04, 5.24026E-04, 6.60989E-04, 9.37946E-04, #
1.26048E-03, 1.58995E-03, 1.98391E-03, 2.13900E-03, 3.88407E-03, #
1.01020E-02, 9.61017E-03, 8.68926E-03, 7.32209E-03, 4.26684E-03, #
1.08794E-02, 9.84504E-02, 2.30535E-01, 3.21401E-01, 4.89408E-01, #
5.57131E-01, 5.78124E-01, 6.05280E-01, 6.24653E-01, 6.36979E-01, #
6.89993E-01, 7.26025E-01, 7.54286E-01, 7.72155E-01, 7.77858E-01, #
7.96308E-01, 8.01552E-01, 8.15916E-01, 8.22345E-01, 8.38710E-01, #
8.49167E-01, 8.51083E-01, 8.69265E-01, 8.75804E-01, 8.64243E-01, #
8.36545E-01, 6.49163E-01, 4.72962E-01, 4.85557E-01] #
#
Qext3=[5.71229E-07, 9.52047E-07, 1.05783E-06, 1.42807E-06, 2.19704E-06, #
2.59650E-06, 3.57018E-06, 4.76025E-06, 5.07902E-06, 6.34698E-06, #
8.16030E-06, 9.13967E-06, 1.14246E-05, 1.42807E-05, 1.82810E-05, #
2.28568E-05, 2.85795E-05, 3.43091E-05, 4.00475E-05, 4.77133E-05, #
5.15572E-05, 5.73318E-05, 6.31133E-05, 7.18237E-05, 8.64103E-05, #
1.01118E-04, 1.15967E-04, 1.38911E-04, 1.58468E-04, 1.93040E-04, #
2.32214E-04, 2.92067E-04, 3.10822E-04, 3.32034E-04, 3.70449E-04, #
4.30584E-04, 5.19593E-04, 6.71937E-04, 7.88021E-04, 1.13900E-03, #
1.37757E-03, 1.49369E-03, 1.69948E-03, 1.87590E-03, 1.99506E-03, #
2.59279E-03, 3.80808E-03, 5.37159E-03, 7.67012E-03, 1.13421E-02, #
1.58829E-02, 2.71114E-02, 3.42294E-02, 6.01987E-02, 8.88036E-02, #
1.32586E-01, 1.88654E-01, 2.68521E-01, 3.42946E-01, 4.01815E-01, #
4.30197E-01, 5.92534E-01, 7.89627E-01, 1.24580E+00] #
#
albedo3=[6.24978E-13, 2.89341E-12, 3.96900E-12, 9.76529E-12, 3.55586E-11, #
5.86944E-11, 1.52583E-10, 3.61680E-10, 4.39313E-10, 8.57307E-10, #
1.82201E-09, 2.55991E-09, 4.99983E-09, 9.76529E-09, 2.04775E-08, #
3.99853E-08, 7.80732E-08, 1.34857E-07, 2.14039E-07, 3.60843E-07, #
4.54317E-07, 6.22707E-07, 8.28190E-07, 1.21354E-06, 2.09162E-06, #
3.31140E-06, 4.92581E-06, 8.25966E-06, 1.19734E-05, 2.06350E-05, #
3.37825E-05, 6.05262E-05, 7.04496E-05, 8.24859E-05, 1.06300E-04, #
1.48066E-04, 2.17522E-04, 3.48882E-04, 4.53516E-04, 7.66535E-04, #
9.66423E-04, 1.05907E-03, 1.21590E-03, 1.34320E-03, 1.42569E-03, #
1.80261E-03, 2.42929E-03, 3.06841E-03, 3.82767E-03, 4.83433E-03, #
5.94951E-03, 8.72100E-03, 1.06808E-02, 1.94455E-02, 3.18659E-02, #
5.66891E-02, 9.84279E-02, 1.72824E-01, 2.48363E-01, 3.05788E-01, #
3.31709E-01, 4.52271E-01, 5.45765E-01, 6.59292E-01] #
#
f1ext=interp1d(freqs, Qext1, kind='linear') #
f1alb=interp1d(freqs, albedo1, kind='linear') #
f2ext=interp1d(freqs, Qext2, kind='linear') #
f2alb=interp1d(freqs, albedo2, kind='linear') #
f3ext=interp1d(freqs, Qext3, kind='linear') #
f3alb=interp1d(freqs, albedo3, kind='linear') #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# #
# Calculate the exponent in Planck's function and take cases to see if #
# it reduces to Wien's law or Rayleigh-Jeans law #
# #
def planck(freq, T): #
#
exponent=h*freq/(K_b*T) #
#
if exponent>=0.1: #
#
B_v= 2.*h*freq**3/c**2 * 1./(np.exp(exponent) - 1) #
#
#- - - -for (hv/kT)<1 compute B_v from from Rayleigh-Taylor's law- - #
else: #
#
B_v=2.*freq**2 *K_b*T/c**2 #
#
return B_v #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
# Give me freq, Temperature and species and I will return #
def dust(freq, Tgp, densgp): #
#
B_v = planck(freq, Tgp) #
#
densgp = densgp * m_p * amu #
#
k_ext=0. #
#- - - - - - - - - -Continuum source function- - - - - - - - - - - - #
S_c=0. #
#
ZV=[] #
#
grain_dens=[] #
#
if freq < np.min(freqs): #
#
S_c, k_ext = 0., 0. #
#
else: #
for i in range (0, 3): #
#
ZVtemp=max(0.,(1.-Tgp/sublimation_T_g[i])*20.)+1.E-10#
#
ZVtemp=min(1., ZVtemp) #
#
ZV.append(ZVtemp) #
#
if i==0: #
#
Q_ext1=f1ext(freq) #
#
albedo_1=f1alb(freq) #
#
elif i==1: #
#
Q_ext2=f2ext(freq) #
#
albedo_2=f2alb(freq) #
#
else: #
Q_ext3=f3ext(freq) #
#
albedo_3=f3alb(freq) #
#
Q_ext=np.array([Q_ext1, Q_ext2, Q_ext3]) #
#
albedo=np.array([albedo_1, albedo_2, albedo_3]) #
#
for i in range (0, 3): #
#
grain_den=(abundance_g[i]+abundance_g[0]*(1.-ZV[0])*fract_g[i])* radius_g[i]* densgp
#
DEXT=ZV[i]*Q_ext[i]*grain_den #
#
k_ext=k_ext+DEXT #
#
# Contributions of individual dust types to emission #
S_c = S_c + B_v*DEXT*(1.-albedo[i]) #
#
return S_c, k_ext #
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
|
ArisTrREPO_NAMEPyRaTEPATH_START.@PyRaTE_extracted@PyRaTE-master@[email protected]@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterternary/legendgrouptitle/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="scatterternary.legendgrouptitle.font",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterternary@legendgrouptitle@font@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "NickMilsonPhysics/BinaryStarSolver",
"repo_path": "BinaryStarSolver_extracted/BinaryStarSolver-master/binarystarsolve/__init__.py",
"type": "Python"
}
|
NickMilsonPhysicsREPO_NAMEBinaryStarSolverPATH_START.@BinaryStarSolver_extracted@BinaryStarSolver-master@binarystarsolve@[email protected]_END.py
|
|
{
"filename": "cu2quPen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/pens/cu2quPen.py",
"type": "Python"
}
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from fontTools.cu2qu import curve_to_quadratic, curves_to_quadratic
from fontTools.pens.basePen import decomposeSuperBezierSegment
from fontTools.pens.filterPen import FilterPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.pointPen import BasePointToSegmentPen
from fontTools.pens.pointPen import ReverseContourPointPen
class Cu2QuPen(FilterPen):
"""A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools SegmentPen protocol.
Args:
other_pen: another SegmentPen used to draw the transformed outline.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
stats: a dictionary counting the point numbers of quadratic segments.
all_quadratic: if True (default), only quadratic b-splines are generated.
if False, quadratic curves or cubic curves are generated depending
on which one is more economical.
"""
def __init__(
self,
other_pen,
max_err,
reverse_direction=False,
stats=None,
all_quadratic=True,
):
if reverse_direction:
other_pen = ReverseContourPen(other_pen)
super().__init__(other_pen)
self.max_err = max_err
self.stats = stats
self.all_quadratic = all_quadratic
def _convert_curve(self, pt1, pt2, pt3):
curve = (self.current_pt, pt1, pt2, pt3)
result = curve_to_quadratic(curve, self.max_err, self.all_quadratic)
if self.stats is not None:
n = str(len(result) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
if self.all_quadratic:
self.qCurveTo(*result[1:])
else:
if len(result) == 3:
self.qCurveTo(*result[1:])
else:
assert len(result) == 4
super().curveTo(*result[1:])
def curveTo(self, *points):
n = len(points)
if n == 3:
# this is the most common case, so we special-case it
self._convert_curve(*points)
elif n > 3:
for segment in decomposeSuperBezierSegment(points):
self._convert_curve(*segment)
else:
self.qCurveTo(*points)
class Cu2QuPointPen(BasePointToSegmentPen):
"""A filter pen to convert cubic bezier curves to quadratic b-splines
using the FontTools PointPen protocol.
Args:
other_point_pen: another PointPen used to draw the transformed outline.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: reverse the winding direction of all contours.
stats: a dictionary counting the point numbers of quadratic segments.
all_quadratic: if True (default), only quadratic b-splines are generated.
if False, quadratic curves or cubic curves are generated depending
on which one is more economical.
"""
__points_required = {
"move": (1, operator.eq),
"line": (1, operator.eq),
"qcurve": (2, operator.ge),
"curve": (3, operator.eq),
}
def __init__(
self,
other_point_pen,
max_err,
reverse_direction=False,
stats=None,
all_quadratic=True,
):
BasePointToSegmentPen.__init__(self)
if reverse_direction:
self.pen = ReverseContourPointPen(other_point_pen)
else:
self.pen = other_point_pen
self.max_err = max_err
self.stats = stats
self.all_quadratic = all_quadratic
def _flushContour(self, segments):
assert len(segments) >= 1
closed = segments[0][0] != "move"
new_segments = []
prev_points = segments[-1][1]
prev_on_curve = prev_points[-1][0]
for segment_type, points in segments:
if segment_type == "curve":
for sub_points in self._split_super_bezier_segments(points):
on_curve, smooth, name, kwargs = sub_points[-1]
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
cubic = [prev_on_curve, bcp1, bcp2, on_curve]
quad = curve_to_quadratic(cubic, self.max_err, self.all_quadratic)
if self.stats is not None:
n = str(len(quad) - 2)
self.stats[n] = self.stats.get(n, 0) + 1
new_points = [(pt, False, None, {}) for pt in quad[1:-1]]
new_points.append((on_curve, smooth, name, kwargs))
if self.all_quadratic or len(new_points) == 2:
new_segments.append(["qcurve", new_points])
else:
new_segments.append(["curve", new_points])
prev_on_curve = sub_points[-1][0]
else:
new_segments.append([segment_type, points])
prev_on_curve = points[-1][0]
if closed:
# the BasePointToSegmentPen.endPath method that calls _flushContour
# rotates the point list of closed contours so that they end with
# the first on-curve point. We restore the original starting point.
new_segments = new_segments[-1:] + new_segments[:-1]
self._drawPoints(new_segments)
def _split_super_bezier_segments(self, points):
sub_segments = []
# n is the number of control points
n = len(points) - 1
if n == 2:
# a simple bezier curve segment
sub_segments.append(points)
elif n > 2:
# a "super" bezier; decompose it
on_curve, smooth, name, kwargs = points[-1]
num_sub_segments = n - 1
for i, sub_points in enumerate(
decomposeSuperBezierSegment([pt for pt, _, _, _ in points])
):
new_segment = []
for point in sub_points[:-1]:
new_segment.append((point, False, None, {}))
if i == (num_sub_segments - 1):
# the last on-curve keeps its original attributes
new_segment.append((on_curve, smooth, name, kwargs))
else:
# on-curves of sub-segments are always "smooth"
new_segment.append((sub_points[-1], True, None, {}))
sub_segments.append(new_segment)
else:
raise AssertionError("expected 2 control points, found: %d" % n)
return sub_segments
def _drawPoints(self, segments):
pen = self.pen
pen.beginPath()
last_offcurves = []
points_required = self.__points_required
for i, (segment_type, points) in enumerate(segments):
if segment_type in points_required:
n, op = points_required[segment_type]
assert op(len(points), n), (
f"illegal {segment_type!r} segment point count: "
f"expected {n}, got {len(points)}"
)
offcurves = points[:-1]
if i == 0:
# any off-curve points preceding the first on-curve
# will be appended at the end of the contour
last_offcurves = offcurves
else:
for pt, smooth, name, kwargs in offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pt, smooth, name, kwargs = points[-1]
if pt is None:
assert segment_type == "qcurve"
# special quadratic contour with no on-curve points:
# we need to skip the "None" point. See also the Pen
# protocol's qCurveTo() method and fontTools.pens.basePen
pass
else:
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
else:
raise AssertionError("unexpected segment type: %r" % segment_type)
for pt, smooth, name, kwargs in last_offcurves:
pen.addPoint(pt, None, smooth, name, **kwargs)
pen.endPath()
def addComponent(self, baseGlyphName, transformation):
assert self.currentPath is None
self.pen.addComponent(baseGlyphName, transformation)
class Cu2QuMultiPen:
"""A filter multi-pen to convert cubic bezier curves to quadratic b-splines
in a interpolation-compatible manner, using the FontTools SegmentPen protocol.
Args:
other_pens: list of SegmentPens used to draw the transformed outlines.
max_err: maximum approximation error in font units. For optimal results,
if you know the UPEM of the font, we recommend setting this to a
value equal, or close to UPEM / 1000.
reverse_direction: flip the contours' direction but keep starting point.
This pen does not follow the normal SegmentPen protocol. Instead, its
moveTo/lineTo/qCurveTo/curveTo methods take a list of tuples that are
arguments that would normally be passed to a SegmentPen, one item for
each of the pens in other_pens.
"""
# TODO Simplify like 3e8ebcdce592fe8a59ca4c3a294cc9724351e1ce
# Remove start_pts and _add_moveTO
def __init__(self, other_pens, max_err, reverse_direction=False):
if reverse_direction:
other_pens = [
ReverseContourPen(pen, outputImpliedClosingLine=True)
for pen in other_pens
]
self.pens = other_pens
self.max_err = max_err
self.start_pts = None
self.current_pts = None
def _check_contour_is_open(self):
if self.current_pts is None:
raise AssertionError("moveTo is required")
def _check_contour_is_closed(self):
if self.current_pts is not None:
raise AssertionError("closePath or endPath is required")
def _add_moveTo(self):
if self.start_pts is not None:
for pt, pen in zip(self.start_pts, self.pens):
pen.moveTo(*pt)
self.start_pts = None
def moveTo(self, pts):
self._check_contour_is_closed()
self.start_pts = self.current_pts = pts
self._add_moveTo()
def lineTo(self, pts):
self._check_contour_is_open()
self._add_moveTo()
for pt, pen in zip(pts, self.pens):
pen.lineTo(*pt)
self.current_pts = pts
def qCurveTo(self, pointsList):
self._check_contour_is_open()
if len(pointsList[0]) == 1:
self.lineTo([(points[0],) for points in pointsList])
return
self._add_moveTo()
current_pts = []
for points, pen in zip(pointsList, self.pens):
pen.qCurveTo(*points)
current_pts.append((points[-1],))
self.current_pts = current_pts
def _curves_to_quadratic(self, pointsList):
curves = []
for current_pt, points in zip(self.current_pts, pointsList):
curves.append(current_pt + points)
quadratics = curves_to_quadratic(curves, [self.max_err] * len(curves))
pointsList = []
for quadratic in quadratics:
pointsList.append(quadratic[1:])
self.qCurveTo(pointsList)
def curveTo(self, pointsList):
self._check_contour_is_open()
self._curves_to_quadratic(pointsList)
def closePath(self):
self._check_contour_is_open()
if self.start_pts is None:
for pen in self.pens:
pen.closePath()
self.current_pts = self.start_pts = None
def endPath(self):
self._check_contour_is_open()
if self.start_pts is None:
for pen in self.pens:
pen.endPath()
self.current_pts = self.start_pts = None
def addComponent(self, glyphName, transformations):
self._check_contour_is_closed()
for trans, pen in zip(transformations, self.pens):
pen.addComponent(glyphName, trans)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@[email protected]@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "barentsen/k2flix",
"repo_path": "k2flix_extracted/k2flix-master/k2flix/version.py",
"type": "Python"
}
|
# It is important to store the version number in a separate file
# so that we can read it from setup.py without importing the package
__version__ = "2.4.0"
|
barentsenREPO_NAMEk2flixPATH_START.@k2flix_extracted@k2flix-master@[email protected]@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/documentation/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# EXOSIMS documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 15 13:16:49 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinxcontrib.mermaid",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "EXOSIMS"
copyright = "2015 - 2023, SIOSlab"
author = "SIOSlab"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '2.1'
# The full version, including alpha/beta/rc tags.
# release = '2.1.0'
with open(os.path.join("..", "EXOSIMS", "__init__.py"), "r") as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
release = version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# html_theme_options = {'fixed_sidebar':'true',
# 'logo': 'logo.png',
# 'sidebar_collapse':'true'
# }
html_theme_options = {"collapse_navigation": False}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# html_logo = os.path.join("..","EXOSIMS_cropped.png")
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "EXOSIMSdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "EXOSIMS.tex", "EXOSIMS Documentation", "SIOSlab", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "exosims", "EXOSIMS Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"EXOSIMS",
"EXOSIMS Documentation",
author,
"EXOSIMS",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
numfig = True
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"astropy": ("https://docs.astropy.org/en/stable/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/stable/", None),
"synphot": ("https://synphot.readthedocs.io/en/latest/", None),
}
# add latex customization
mathjax3_config = {
"tex": {
"macros": {
"intd": ["\\,\\mathrm{d}#1", 1],
"mf": "\\mathbf",
"mc": "\\mathcal",
}
}
}
latex_elements = {
"preamble": r"""
\def\bs{\boldsymbol}
\def\mf{\mathbf}
\def\mb{\mathbb}
\def\mc{\mathcal}
\newcommand{\intd}[1]{\ensuremath{\,\mathrm{d}#1}}
\newcommand{\leftexp}[2]{{\vphantom{#2}}^{#1}\!{#2}}
\newcommand{\leftsub}[2]{{\vphantom{#2}}_{#1}\!{#2}}
""",
}
mermaid_params = ["-p" "puppeteer-config.json"]
mermaid_version = "9.4.0" # temporary until next sphinxcontrib.mermaid release
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@[email protected]@.PATH_END.py
|
{
"filename": "Parameter.py",
"repo_name": "ThomasEdwardRiley/xpsi-pre-transfer",
"repo_path": "xpsi-pre-transfer_extracted/xpsi-pre-transfer-master/xpsi/Parameter.py",
"type": "Python"
}
|
from __future__ import division, print_function
from .global_imports import *
from . import global_imports
from . import make_verbose
import string
from types import MethodType
from abc import ABCMeta, abstractmethod
class StrictBoundsError(xpsiError):
""" Raised if the set parameter value lies beyond strict bounds. """
class Derive(object):
""" Helper class to bind to parameter instances as a method.
This is a powerful abstract base class for customisting how derived
parameters are evaluated from references to existing parameter objects.
:param obj refs:
Some references to parameter objects or subspace objects that
need to be stored for deriving variable values. For example,
a dictionary with key-ref pairs.
.. note::
In principle, it might be the case that the calling parameter
subspace does not have references to other parameter objects
required, *and* that distinct subspaces require mutual references
to each other. An example would be two hot regions, each of which
has one or more parameters that are derived in part from parameters
of the other hot region. In this case you need to instantiate
the subspaces first by binding instances of this present
class to parameters. However, you then need to complete these instances
of this present class (or more precisely instances of subclasses) with
the required references. As an example, consider the following:
.. highlight:: python
.. code-block:: python
bounds = dict(super_colatitude = (None, None),
super_radius = (None, None),
super_temperature = (None, None))
class derive(xpsi.Derive):
def __init__(self):
pass
def __call__(self, boundto, caller = None):
# ref is a reference to another hot region object
return self.ref['phase_shift'] - 0.5
ref_1 = derive()
# a simple circular, simply-connected spot
primary = xpsi.HotRegion(bounds=bounds,
values={'phase_shift': ref_1},
symmetry=True,
omit=False,
cede=False,
concentric=False,
sqrt_num_cells=32,
min_sqrt_num_cells=10,
max_sqrt_num_cells=64,
num_leaves=100,
num_rays=200,
do_fast=False,
prefix='p')
bounds = dict(omit_colatitude = (None, None),
super_radius = (None, None),
phase_shift = (None, None),
super_temperature = (None, None),
omit_radius = (None, None),
omit_azimuth = (None, None))
class derive(xpsi.Derive):
def __init__(self):
pass
def __call__(self, boundto, caller = None):
return math.pi - self.ref['super_colatitude']
ref_2 = derive()
# overlap of an omission region and
# and a radiating super region
secondary = xpsi.HotRegion(bounds=bounds,
values={'super_colatitude': ref_2},
symmetry=True,
omit=True,
cede=False,
concentric=False,
sqrt_num_cells=32,
min_sqrt_num_cells=10,
max_sqrt_num_cells=100,
num_leaves=100,
num_rays=200,
do_fast=False,
is_secondary=True,
prefix='s')
from xpsi import HotRegions
hot = HotRegions((primary, secondary))
# the crux: resolve the mutual refs
ref_1.ref = secondary
ref_2.ref = primary
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, refs):
self.refs = refs
@abstractmethod
def __call__(self, boundto, caller = None):
""" Derive value from some parameters.
The second argument is the parameter object to which this callable
is bound. The third argument is a subspace from which the call
comes, which might be useful or even sufficient for retrieving the
required information, in which case write an initialiser with ``pass``
as the body.
"""
return 0 # calculate something and return
class Parameter(object):
""" A parameter.
:param str name:
A unique parameter name for identification in attribute lookup.
:param tuple strict_bounds:
One 2-tuple of hard bounds per parameter. Can be unbounded
*in principle*, but read the documentation for the
:class:`~.Prior.Prior` class first.
:param tuple bounds:
One 2-tuple of hard bounds per parameter. Can be unbounded
*in principle*, but read the documentation for the
:class:`~.Prior.Prior` class first.
:param bool permit_prepend:
Allow encapsulating subspaces to prepend the parameter name with
a prefix? Note that this gives permission recursively to all
encapsulating subspaces higher in the hierarchy.
:param bool is_hyperparameter:
A boolean declaring whether the parameter is a hyperparameter.
"""
@make_verbose('Creating parameter:')
def __init__(self, name, strict_bounds, bounds=(None,None),
doc=None, symbol=r'', value=None, permit_prepend=True,
deactivate_verbosity=False, is_hyperparameter=False):
""" See the class docstring. """
self.name = name
self.strict_bounds = strict_bounds
self.fixed = True if bounds is None else False
self.is_hyperparameter = is_hyperparameter
self.bounds = bounds
self.doc = doc
self.symbol = symbol
if callable(value):
if not self.fixed:
raise TypeError('Initial value should be a scalar not callable.')
if not isinstance(value, Derive):
raise TypeError('It is recommended to subclass the prototype '
'abstract base class ``Derive``.')
self.evaluate = MethodType(value, self, Parameter)
self.derived = True
else:
self.value = value
self.derived = False
self.permit_prepend = permit_prepend
if self.fixed: # fixed can also encapsulate derived variables
if callable(value):
end = 'that is derived from ulterior variables'
else:
end = 'with fixed value %.3e' % value
else:
bounds = self.bounds # bounds might have been automatically set
if None in bounds:
if bounds[0] is not None and bounds[1] is None:
bounds = 'lower-bound %.3e' % bounds[0]
elif bounds[0] is None and bounds[1] is not None:
bounds = 'upper-bound %.3e' % bounds[1]
else:
bounds=''
else:
bounds = 'bounds [%.3e, %.3e]' % tuple(bounds)
if value is None:
value = ''
else:
value = 'initial value %.3e' % value
if bounds and value:
end = 'with ' + bounds + ' and ' + value
elif bounds:
end = 'with ' + bounds
elif value:
end = 'with ' + value
else:
end = ''
yield (' > Named "%s" %s.' % (name, end) if end
else ' > Named "%s".' % name)
if doc is not None:
yield ' > %s' % self.doc # get set version
yield None # initialiser must return NoneType
@property
def name(self):
""" Get the name of the parameter. """
return self._name
@name.setter
def name(self, name):
if isinstance(name, _six.string_types):
self._name = name
else:
raise TypeError('Name must be a string.')
@property
def is_hyperparameter(self):
""" Is the variable a hyperparameter? """
return self._is_hyperparameter
@is_hyperparameter.setter
def is_hyperparameter(self, is_hyper):
if not isinstance(is_hyper, bool):
raise TypeError('A boolean is required to define variable type.')
self._is_hyperparameter = is_hyper
@property
def permit_prepend(self):
""" Allow subspaces to prepend parameter with prefixes? """
return self._permit_prepend
@permit_prepend.setter
def permit_prepend(self, permit):
if not isinstance(permit, bool):
raise TypeError('Provide a boolean to define prepend permissions.')
self._permit_prepend = permit
@property
def doc(self):
""" Redirect to the magic docstring. """
return self.__doc__
@doc.setter
def doc(self, doc):
if isinstance(doc, _six.string_types):
lines = [string.strip(line) for line in doc.splitlines()]
doc = string.join([line for line in lines if line], '\n')
if doc[-1] != '.': doc += '.'
self.__doc__ = doc
elif doc is not None:
raise TypeError('Parameter description must be a string and you '
'a description must be provided.')
@doc.deleter
def doc(self):
del self.__doc__
def __repr__(self):
""" Get a string summary of the parameter and current value. """
try:
val = self.evaluate()
except (TypeError, AttributeError, NameError):
msg = ''
else:
msg = (' = %.3e' % val if val is not None else '')
return str(self)[:-1] + msg
def __str__(self):
""" Redirect to the magic doctring. """
return self.__doc__
@property
def symbol(self):
""" Get TeX-compatible symbol."""
return self._tex
@symbol.setter
def symbol(self, symbol):
if isinstance(symbol, _six.string_types):
self._tex = symbol
elif symbol is not None:
raise TypeError('Invalid type for tex-compatible symbol string.')
@property
def strict_bounds(self):
""" Get the strict bounds of the parameter. """
return self._strict_bounds
@strict_bounds.setter
def strict_bounds(self, bounds):
try:
iter(bounds)
except TypeError:
raise TypeError('Bounds must be an ordered container with '
'two elements.')
else:
if len(bounds) != 2:
raise TypeError('Bounds must be an ordered container with two elements.')
else:
if None not in bounds:
if bounds[0] >= bounds[1]:
raise ValueError('Lower-bound is greater than or equal to upper-bound.')
if bounds[0] is None: bounds[0] = -_np.inf
if bounds[1] is None: bounds[1] = _np.inf
self._strict_bounds = bounds
@property
def bounds(self):
""" Get the hard bounds of the parameter. """
return self._bounds
@bounds.setter
def bounds(self, bounds):
try:
iter(bounds)
except TypeError:
if bounds is None and self.fixed:
self._bounds = None
else:
raise TypeError('Bounds must be an ordered container with '
'two elements if the parameter is free, '
'or ``None`` if fixed.')
else:
if self.fixed:
raise TypeError('Check if parameter %s should actually be '
'free.' % self._name)
elif len(bounds) != 2:
raise TypeError('Bounds must be an ordered container with two elements.')
else:
if None not in bounds:
if bounds[0] >= bounds[1]:
raise ValueError('Lower-bound is greater than or equal to upper-bound.')
bounds = list(bounds) # make mutable
for i, bound in enumerate(bounds):
if bound is not None:
if not self.strict_bounds[0] <= bound <= self.strict_bounds[1]:
raise ValueError('Invalid bound for parameter '
'named "%s".' % self.name)
else:
bounds[i] = self.strict_bounds[i]
self._bounds = tuple(bounds) # back to immutable
@property
def fixed(self):
""" Is the variable fixed (or derived) or a free parameter? """
return self._fixed
@fixed.setter
def fixed(self, fix):
if not isinstance(fix, bool):
raise TypeError('A boolean is required to define variable type.')
self._fixed = fix
@property
def value(self):
""" Get the current parameter value. """
return self._value
@value.setter
def value(self, value):
try:
if not self.strict_bounds[0] <= float(value) <= self.strict_bounds[1]:
# handle this exception externally if sampling software can
# make proposals outside of strict bounds
raise StrictBoundsError('Value of parameter %s is not within '
'the strict bounds.' % self.name)
except TypeError:
if self._fixed:
raise ValueError('Value must be a float.')
if value is not None:
print('Value must be a float.')
raise
else:
self._value = None
else:
try:
self._cache() # cache it!
except AttributeError:
# first time being set so nothing to cache
pass
self._value = float(value)
def evaluate(self, caller = None):
""" Symlink to property pending dynamic overwrite.
:param obj caller:
An object, such as the calling class itself, used to transfer
information from higher in the hierarchy.
Overwrite if value must be explicitly computed from other variables
and parameters. That is, subclass :class:`~.Derive`, instantiate,
and pass as a callable to the initialiser of the present class as
a value of a *derived* parameter. The callable will automatically
replace the bound variant of this present method and will have access
to some caller object, plus other references (class and/or instance)
attributes you define in the subclass.
"""
return self.value
@property
def cached(self):
""" Get the cached value. """
try:
return self._cached
except AttributeError:
return None
@cached.setter
def cached(self, value):
""" To clear the cache, use the deleter. """
try:
self._cached = float(value)
except TypeError:
if value is None:
self._cached = None
else:
raise TypeError('A float is required.')
@cached.deleter
def cached(self):
""" Clear the cache. """
try:
del self._cached
except AttributeError:
pass # quietly do nothing
def _cache(self):
self._cached = self._value
@property
def needs_update(self):
""" Do cached dependencies need to be updated? """
if self.is_hyperparameter:
return False # likelihood implicitly dependent on hyperparameters
if self.derived:
return True # assume ulterior variables have changed
elif self.fixed:
return False
try:
return self.cached != self._value
except AttributeError:
return True
def __call__(self, value = None):
""" Update or get current point if the parameter is *free*.
:param array-like p:
New point to update to. If ``None``, the current point is returned.
:returns: Current point (if the call was not an update).
:rtype: array-like
:raises AttributeError:
If parameter is derived or has no value yet but the argument is
``None``.
"""
if value is not None:
self.value = value
else:
return self.value
|
ThomasEdwardRileyREPO_NAMExpsi-pre-transferPATH_START.@xpsi-pre-transfer_extracted@xpsi-pre-transfer-master@[email protected]@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.