metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "diaObjectCopy.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/pipeline/filter/features/diaObjectCopy.py",
"type": "Python"
}
|
from features.FeatureGroup import FeatureGroup
class diaObjectCopy(FeatureGroup):
"""Several features are simply copied from the diaObject that Rubin provides"""
_features = [
'timestamp',
'diaObjectId',
'ra', 'decl',
'g_psfFluxMean',
'g_psfFluxMeanErr',
'r_psfFluxMean',
'r_psfFluxMeanErr',
]
def run(self):
"""Return mean flux in nJansky in all filters"""
# copy the values from the alert
output = {}
object = self.alert["diaObject"]
for f in self._features:
if f in object:
output[f] = object[f]
else:
if self.verbose: print('diaObjectCopy: did not find %s' % f)
output[f] = None
return output
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@pipeline@filter@[email protected]@.PATH_END.py
|
{
"filename": "test_line_lists.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/configs/default/plugins/line_lists/tests/test_line_lists.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
import astropy.units as u
from astropy.table import QTable
from specutils import Spectrum1D
from jdaviz.core.marks import SpectralLine
from jdaviz.core.linelists import get_available_linelists
def test_line_lists(specviz_helper):
spec = Spectrum1D(flux=np.random.rand(100)*u.Jy,
spectral_axis=np.arange(6000, 7000, 10)*u.AA)
specviz_helper.load_data(spec)
lt = QTable()
lt['linename'] = ['O III', 'Halpha']
lt['rest'] = [0, 6563]*u.AA
with pytest.raises(ValueError, match='all rest values must be positive'):
specviz_helper.load_line_list(lt)
lt = QTable()
lt['linename'] = ['O III', 'Halpha']
lt['rest'] = [5007, 6563]*u.AA
specviz_helper.load_line_list(lt)
assert len(specviz_helper.spectral_lines) == 2
assert specviz_helper.spectral_lines.loc["linename", "Halpha"]["listname"] == "Custom"
assert np.all(specviz_helper.spectral_lines["show"])
assert specviz_helper.plugins['Line Lists']._obj.rs_enabled is True
specviz_helper.erase_spectral_lines()
assert np.all(specviz_helper.spectral_lines["show"] == False) # noqa
assert specviz_helper.plugins['Line Lists']._obj.rs_enabled is False
specviz_helper.plot_spectral_line("Halpha")
specviz_helper.plot_spectral_line("O III 5007.0")
assert np.all(specviz_helper.spectral_lines["show"])
assert (
specviz_helper.plugins["Line Lists"]._obj.list_contents["Custom"]["medium"]
== "Unknown (Custom)"
)
def test_redshift(specviz_helper, spectrum1d):
# Also test that plugin is disabled before data is loaded.
ll_plugin = specviz_helper.plugins['Line Lists']._obj
assert ll_plugin.disabled_msg
label = "Test 1D Spectrum"
specviz_helper.load_data(spectrum1d, data_label=label)
assert not ll_plugin.disabled_msg
lt = QTable()
lt['linename'] = ['O III', 'Halpha']
lt['rest'] = [5007, 6563]*u.AA
lt['redshift'] = u.Quantity(0.046)
lt['listname'] = 'Test List'
with pytest.warns(UserWarning, match='per line/list redshifts not supported, use viz.set_redshift'): # noqa
specviz_helper.load_line_list(lt)
# open the plugin so that all updates run
ll_plugin.plugin_opened = True
line = ll_plugin.list_contents['Test List']['lines'][0]
assert_allclose(line['obs'], line['rest'])
# test API access
specviz_helper.set_redshift(0.01)
specviz_helper.set_redshift_slider_bounds(range=0.5, step=0.01)
specviz_helper.set_redshift_slider_bounds(range='auto', step='auto')
# test plugin
ll_plugin.rs_redshift = 0.1
assert ll_plugin.rs_rv == 28487.06614479641
ll_plugin.rs_rv = 30000
assert ll_plugin.rs_redshift == 0.10561890816244568
# https://github.com/spacetelescope/jdaviz/issues/1692
# adding new data entry from a plugin should not reset redshift
specviz_helper.plugins['Gaussian Smooth'].smooth()
assert ll_plugin.rs_redshift == 0.10561890816244568
# test that setting observed wavelength works
ll_plugin.vue_change_line_obs({'list_name': 'Test List',
'line_ind': 0,
'obs_new': 5508})
assert_allclose(line['obs'], 5508)
assert ll_plugin.rs_redshift == 0.10005991611743559
# https://github.com/spacetelescope/jdaviz/issues/1168
ll_plugin.vue_set_identify(('Test List', line, 0))
ll_plugin.vue_remove_list('Test List')
assert ll_plugin._viewer.spectral_lines is None
assert ll_plugin.identify_label == ''
def test_load_available_preset_lists(specviz_helper, spectrum1d):
""" Loads all available line lists and checks the medium requirement """
label = "Test 1D Spectrum"
specviz_helper.load_data(spectrum1d, data_label=label)
# Check to make sure we got our line lists
available_linelists = get_available_linelists()
assert len(available_linelists) > 0
for linelist in available_linelists:
specviz_helper.plugins['Line Lists']._obj.vue_list_selected(linelist)
specviz_helper.plugins['Line Lists']._obj.vue_load_list(linelist)
# Check that we loaded all the lists (+1 because of the Custom list)
assert (
len(specviz_helper.plugins['Line Lists']._obj.list_contents.keys()) ==
len(available_linelists) + 1
)
# Line list must have "medium" info to be available
for list in specviz_helper.plugins['Line Lists']._obj.list_contents.values(): # noqa
assert 'medium' in list
def test_line_identify(specviz_helper, spectrum1d):
specviz_helper.load_data(spectrum1d)
lt = QTable()
lt['linename'] = ['O III', 'Halpha']
lt['rest'] = [5007, 6563]*u.AA
lt['listname'] = 'Test List'
specviz_helper.load_line_list(lt)
ll_plugin = specviz_helper.app.get_tray_item_from_name('g-line-list')
line = ll_plugin.list_contents['Test List']['lines'][0]
assert line.get('identify', False) is False
ll_plugin.vue_set_identify(('Test List', line, 0))
assert line.get('identify', False) is True
ll_plugin.vue_change_visible(('Test List', line, 0))
assert line.get('show') is False
assert line.get('identify', False) is False
def test_global_redshift_applied(specviz_helper, spectrum1d):
specviz_helper.load_data(spectrum1d)
# Create a table with redshift included
lt = QTable({'linename': ['O III', 'Halpha'],
'rest': [5007, 6563] * u.AA,
'redshift': u.Quantity([0, 0])})
with pytest.warns(UserWarning, match='per line/list redshifts not supported, use viz.set_redshift'): # noqa
specviz_helper.load_line_list(lt)
# Load a line, and apply redshift globally
specviz_helper.plot_spectral_line("Halpha")
specviz_helper.set_redshift(0.01)
# Load second line, redshift should also be applied to it
specviz_helper.plot_spectral_line("O III")
viewer_lines = [mark for mark in specviz_helper.app.get_viewer(
specviz_helper._default_spectrum_viewer_reference_name).figure.marks
if isinstance(mark, SpectralLine)]
assert np.allclose([line.redshift for line in viewer_lines], 0.01)
def test_global_redshift_applied_to_all(specviz_helper, spectrum1d):
specviz_helper.load_data(spectrum1d)
# Create a table with redshift included
lt = QTable({'linename': ['O III', 'Halpha', 'O I'],
'rest': [5007, 6563, 6300] * u.AA,
'redshift': u.Quantity([0, 0, 0])})
with pytest.warns(UserWarning, match='per line/list redshifts not supported, use viz.set_redshift'): # noqa
specviz_helper.load_line_list(lt)
# Load a line, so we can apply redshift
specviz_helper.plot_spectral_line("Halpha")
global_redshift = 0.01
specviz_helper.set_redshift(global_redshift)
# Load remaining lines
specviz_helper.plot_spectral_lines(global_redshift)
viewer_lines = [mark for mark in specviz_helper.app.get_viewer(
specviz_helper._default_spectrum_viewer_reference_name).figure.marks
if isinstance(mark, SpectralLine)]
assert np.allclose([line.redshift for line in viewer_lines], 0.01)
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@configs@default@plugins@line_lists@tests@[email protected]_END.py
|
{
"filename": "test_mtl_streams.py",
"repo_name": "desihub/desitarget",
"repo_path": "desitarget_extracted/desitarget-main/py/desitarget/test/test_mtl_streams.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desitarget.mtl specifically for secondary/stream programs.
"""
import os
import unittest
import numpy as np
from astropy.table import Table, join
from desitarget.targetmask import desi_mask as Mx
from desitarget.targetmask import scnd_mask as sMx
from desitarget.mtl import make_mtl, mtldatamodel, survey_data_model
from desitarget.targets import initial_priority_numobs, main_cmx_or_sv
from desiutil.log import get_logger
log = get_logger()
class TestMTLStreams(unittest.TestCase):
def setUp(self):
self.targs = Table()
# ADM two copies of each of the GD1-style targets.
self.types = np.array(['GD1_BRIGHT_PM', 'GD1_FAINT_NO_PM', 'GD1_FILLER',
'GD1_BRIGHT_PM', 'GD1_FAINT_NO_PM', 'GD1_FILLER'])
# ADM the initial values of PRIORITY.
self.priorities = [sMx[t].priorities['UNOBS'] for t in self.types]
# ADM the initial values of NUMOBS_MORE.
self.nom = [sMx[t].numobs for t in self.types]
nt = len(self.types)
# ADM add some "extra" columns that are needed for observations.
for col in ["RA", "DEC", "PARALLAX", "PMRA", "PMDEC", "REF_EPOCH"]:
self.targs[col] = np.zeros(nt, dtype=mtldatamodel[col].dtype)
self.targs['DESI_TARGET'] = Mx["SCND_ANY"].mask
self.targs['SCND_TARGET'] = [sMx[t].mask for t in self.types]
for col in ['BGS_TARGET', 'MWS_TARGET', 'SUBPRIORITY', "PRIORITY"]:
self.targs[col] = np.zeros(nt, dtype=mtldatamodel[col].dtype)
n = len(self.targs)
self.targs['TARGETID'] = list(range(n))
# ADM determine the initial PRIORITY and NUMOBS.
pinit, ninit = initial_priority_numobs(self.targs, obscon="BRIGHT",
scnd=True)
self.targs["PRIORITY_INIT"] = pinit
self.targs["NUMOBS_INIT"] = ninit
# ADM set up an ersatz redshift catalog.
self.zcat = Table()
# ADM reverse the TARGETIDs to check joins.
self.zcat['TARGETID'] = np.flip(self.targs['TARGETID'])
self.zcat['Z'] = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001]
# ADM set ZWARN for half of the objects to test both MORE_ZWARN
# ADM and MORE_ZGOOD.
self.zcat['ZWARN'] = [0, 0, 0, 1, 1, 1]
self.zcat['NUMOBS'] = [1, 1, 1, 1, 1, 1]
self.zcat['ZTILEID'] = [-1, -1, -1, -1, -1, -1]
# ADM expected progression in priorities and numbers of observations.
# ADM hand-code to some extent to better check for discrepancies.
iigood = self.zcat["ZWARN"] == 0
zgood = [sMx[t].priorities['MORE_ZGOOD'] for t in self.types[iigood]]
zwarn = [sMx[t].priorities['MORE_ZWARN'] for t in self.types[~iigood]]
# ADM PRIORITY after zero, one, two, three passes through MTL.
self.post_prio = pinit
# ADM scalar version of initial numbers of observations. Should
# ADM (deliberately) fail if classes have different NUMOBS_INIT.
self.ninit_int = int(np.unique(ninit))
# ADM loop through the numbers of observations, retain priority.
for i in range(self.ninit_int - 1):
self.post_prio = np.vstack([self.post_prio, zgood + zwarn])
self.post_prio = np.vstack(
[self.post_prio, [sMx[t].priorities['DONE'] for t in self.types]])
# ADM NUMOBS after zero, one, two, three passes through MTL.
self.post_nom = ninit
for numobs in np.arange(1, self.ninit_int + 1):
self.post_nom = np.vstack([self.post_nom,
np.array(self.nom) - numobs])
def flesh_out_data_model(self, cat):
"""Flesh out columns to produce full Main Survey data model.
"""
truedm = survey_data_model(cat, survey="main")
addedcols = list(set(truedm.dtype.names) - set(cat.dtype.names))
for col in addedcols:
cat[col] = [-1] * len(cat)
# ADM Set QN redshifts ('Z_QN') to mimic redrock redshifts ('Z').
if 'Z' in cat.dtype.names:
cat['Z_QN'] = cat['Z']
cat['IS_QSO_QN'] = 1
return cat
def test_numobs(self):
"""Test priorities, numobs, set correctly with no zcat.
"""
t = self.targs.copy()
t = self.flesh_out_data_model(t)
mtl = make_mtl(t, "BRIGHT")
log.info(f"Initial: {mtl['PRIORITY']}, {self.post_prio[0]}")
log.info(f"Initial: {mtl['NUMOBS_MORE']}, {self.post_nom[0]}")
self.assertTrue(np.all(mtl['NUMOBS_MORE'] == self.post_nom[0]))
self.assertTrue(np.all(mtl['PRIORITY'] == self.post_prio[0]))
def test_zcat(self):
"""Test priorities/numobs correct after zcat/multiple passes.
"""
t = self.targs.copy()
t = self.flesh_out_data_model(t)
zc = self.zcat.copy()
zc = self.flesh_out_data_model(zc)
for numobs in range(1, self.ninit_int + 1):
zc["NUMOBS"] = numobs
mtl = make_mtl(t, "BRIGHT", zcat=zc, trim=False)
log.info(f"{numobs}, {mtl['PRIORITY']}, {self.post_prio[numobs]}")
log.info(f"{numobs}, {mtl['NUMOBS_MORE']}, {self.post_nom[numobs]}")
self.assertTrue(np.all(mtl['PRIORITY'] == self.post_prio[numobs]))
self.assertTrue(np.all(mtl['NUMOBS_MORE'] == self.post_nom[numobs]))
|
desihubREPO_NAMEdesitargetPATH_START.@desitarget_extracted@desitarget-main@py@desitarget@test@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/funnel/marker/colorbar/title/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@funnel@marker@colorbar@title@[email protected]_END.py
|
{
"filename": "qotree.py",
"repo_name": "spacepy/spacepy",
"repo_path": "spacepy_extracted/spacepy-main/spacepy/sandbox/qotree.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
Let's try to make a OQ tree. QO tree. Whatever.
Original by Dan Welling, cleanup and subclassing by Brian Larsen
'''
import numpy as np
def leftdaughter(dim, k):
return k*2**dim-2**dim+2
def rightdaughter(dim, k):
return k*2**dim+1
def mother(dim, k):
return (k+2**dim-2)/2**dim
def boxes_through_level(dim, level):
return ((2**dim)**level - 1)/(2**dim - 1)
class QTree(dict):
'''
Base class for Quad/Oct tree objects assuming cell-centered grid points
in a rectangular non-regular layout.
'''
def __init__(self, grid, grid_edges=False, max_depth = 3, decision_func = None):
'''
Build QO Tree for input grid. Grid should be a NxM numpy array where
N is the number of dimensions and M is the number of points.
Parameters
==========
grid, numpy.ndarray
Grid should be a NxM numpy array where N is the number of dimensions and M is the number of points.
'''
if (grid.shape[0] != 2):
raise(NotImplementedError("Sorry, QTrees are for 2D grids only."))
self.decision_func = decision_func
self.max_depth = max_depth
self.grid = grid
(self.d, self.npts) = grid.shape
if not grid_edges:
#####
# toolbox.bin_center_to_edges can be used to make centers into edges
#####
# Find limits of cell-centered grid.
# Get values and locations of grid max/min
minloc, xmin = grid[0,:].argmin(), grid[0,:].min()
maxloc, xmax = grid[0,:].argmax(), grid[0,:].max()
# Distance from min X value is grid size at that point.
r = np.sqrt((grid[0,:]-xmin)**2. + (grid[1,:]-grid[1,:][minloc])**2.)
ml=r[r>0].min()
# Actual min is not cell center, but cell boundary.
xmin -= ml/2.
# Repeat for Xmax.
r = np.sqrt((grid[0,:]-xmax)**2. + (grid[1,:]-grid[1,:][maxloc])**2.)
ml=r[r>0].min()
xmax += ml/2.
# Repeat for Ymin/max.
minloc, ymin = grid[1,:].argmin(), grid[1,:].min()
maxloc, ymax = grid[1,:].argmax(), grid[1,:].max()
r = np.sqrt((grid[1,:]-ymin)**2. + (grid[0,:]-grid[0,:][minloc])**2.)
ml=r[r>0].min()
ymin -= ml/2.
r = np.sqrt((grid[1,:]-ymax)**2. + (grid[0,:]-grid[0,:][maxloc])**2.)
ml=r[r>0].min()
ymax += ml/2.
self.aspect_ratio = (xmax-xmin)/(ymax-ymin)
# Use spatial range of grid to seed root of QTree.
self[1] = Branch(np.asarray([xmin,xmax,ymin,ymax]))
self.locs = np.lexsort( (grid[1,:], grid[0,:]) ) # lexsort works from the right
self._spawn_daughters()
else:
raise(NotImplementedError("Sorry, Only allowed to use grid centers so far"))
def _spawn_daughters(self, i=1):
'''
Internal recursive method for populating tree.
'''
self[i].locs = self.locs[(self.grid[0,:][self.locs]>self[i].lim[0]) &
(self.grid[0,:][self.locs]<self[i].lim[1]) &
(self.grid[1,:][self.locs]>self[i].lim[2]) &
(self.grid[1,:][self.locs]<self[i].lim[3]) ]
self[i].npts = self[i].locs.size
if self.decision_func != None:
if not self.decision_func(self, i):
self[i].isLeaf = True
return
# Subdivide section into four new ones (8 if oct tree)
dx = (self[i].lim[1] - self[i].lim[0])/2.0
x=[self[i].lim[0], self[i].lim[0]+dx, self[i].lim[0]+dx, self[i].lim[0]]
y=[self[i].lim[2], self[i].lim[2], self[i].lim[2]+dx, self[i].lim[2]+dx]
for j, k in enumerate(range(self.getleftdaughter(i), self.getrightdaughter(i)+1)):
self[k] = Branch(np.asarray([x[j],x[j]+dx,y[j],y[j]+dx]))
self[k].locs = self.locs[(self.grid[0,:][self.locs]>self[k].lim[0]) &
(self.grid[0,:][self.locs]<self[k].lim[1]) &
(self.grid[1,:][self.locs]>self[k].lim[2]) &
(self.grid[1,:][self.locs]<self[k].lim[3]) ]
self[k].npts = self[k].locs.size
# if we are at max depth we don't want to split again
# this is tested by seeing if k is in the lowest level
if k <= self.getboxes_through_level(self.max_depth-1):
self._spawn_daughters(k)
else:
self[k].isLeaf = True
def getboxes_through_level(self, level):
return boxes_through_level(self.d, level)
def getmother(self, k):
return mother(self.d, k)
def getleftdaughter(self, k):
return leftdaughter(self.d, k)
def getrightdaughter(self, k):
return rightdaughter(self.d, k)
def plot_res(self, ax, DoLabel=True):
res_colors={
1./32.: 'black',
1./16.: 'darkred',
1./8. : 'red',
1./4. : 'orange',
1./2. : 'yellow',
1. : 'green',
2. : 'darkblue',
4. : 'blue',
8. : 'lightblue',
16. : 'grey',
32. : 'black'}
dx_vals = {}
for key in self:
if self[key].isLeaf:
self[key].plot_res(ax, fc=res_colors[self[key].dx])
dx_vals[self[key].dx] = 1.0
if DoLabel:
ax.annotate('Resolution:', [1.01,0.99], xycoords='axes fraction',
color='k',size='medium')
for i,key in enumerate(sorted(dx_vals.keys())):
if key<1:
label = '1/%i' % (key**-1)
else:
label = '%i' % key
ax.annotate('%s $R_{E}$'%label, [1.01,0.95-i*0.05],
xycoords='axes fraction', color=res_colors[key],
size='x-large')
class Branch(object):
'''
Base class for branches/leafs along a QO tree.
'''
def __init__(self, lim, isLeaf=False):
'''
lim should be a 4 element list of the
dimensional boundaries of the branch.
'''
try:
assert(len(lim) == 4)
except AssertionError:
raise(ValueError("Limits can only be a 4 element array"))
self.isLeaf = isLeaf
self.lim = lim
def plotbox(self, ax, lc='k', **kwargs):
'''
Plot a box encompassing the branch lim onto
axis 'ax'.
'''
from matplotlib.collections import LineCollection
from numpy import array
l=self.lim
segs = (
array([ [l[0],l[2] ], [ l[1],l[2]] ]),
array([ [l[0],l[3] ], [ l[1],l[3]] ]),
array([ [l[0],l[2] ], [ l[0],l[3]] ]),
array([ [l[1],l[2] ], [ l[1],l[3]] ]))
coll=LineCollection(segs, colors=lc, **kwargs)
ax.add_collection(coll)
#ax.plot(l[0:2], [l[2],l[2]], **kwargs)
#ax.plot(l[0:2], [l[3],l[3]], **kwargs)
#ax.plot([l[0],l[0]], l[2:], **kwargs)
#ax.plot([l[1],l[1]], l[2:], **kwargs)
def plot_res(self, ax, fc='gray'):
if not self.isLeaf: return
from matplotlib.patches import Polygon
from numpy import array
l=self.lim
verts = array([
[l[0],l[2] ], [ l[1],l[2]],
[l[1],l[3] ], [ l[0],l[3]]])
poly = Polygon(verts, True, ec=None, fc=fc, lw=0.0001)
ax.add_patch(poly)
|
spacepyREPO_NAMEspacepyPATH_START.@spacepy_extracted@spacepy-main@spacepy@[email protected]@.PATH_END.py
|
{
"filename": "test_crossmatch_tns_dump.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/tests/unit/services/test_crossmatch_tns_dump.py",
"type": "Python"
}
|
import context
#import crossmatch_tns_dump
import unittest
class CrossmatchTNSDumpTest(unittest.TestCase):
"""Placeholder"""
if __name__ == '__main__':
import xmlrunner
runner = xmlrunner.XMLTestRunner(output='test-reports')
unittest.main(testRunner=runner)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@tests@unit@services@[email protected]_END.py
|
{
"filename": "javelin_ai_gateway.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/chat_models/javelin_ai_gateway.py",
"type": "Python"
}
|
import logging
from typing import Any, Dict, List, Mapping, Optional, cast
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import (
ChatGeneration,
ChatResult,
)
from pydantic import BaseModel, ConfigDict, Field, SecretStr
logger = logging.getLogger(__name__)
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg]
class ChatParams(BaseModel, extra="allow"):
"""Parameters for the `Javelin AI Gateway` LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class ChatJavelinAIGateway(BaseChatModel):
"""`Javelin AI Gateway` chat models API.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain_community.chat_models import ChatJavelinAIGateway
chat = ChatJavelinAIGateway(
gateway_uri="<javelin-ai-gateway-uri>",
route="<javelin-ai-gateway-chat-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
params: Optional[ChatParams] = None
"""Parameters for the Javelin AI Gateway LLM."""
client: Any = None
"""javelin client."""
javelin_api_key: Optional[SecretStr] = Field(None, alias="api_key")
"""The API key for the Javelin AI Gateway."""
model_config = ConfigDict(
populate_by_name=True,
)
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri,
api_key=cast(SecretStr, self.javelin_api_key).get_secret_value(),
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"javelin_api_key": cast(SecretStr, self.javelin_api_key).get_secret_value(),
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = self.client.query_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = await self.client.aquery_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "javelin-ai-gateway-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatJavelinAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by Javelin AI Gateway "
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for candidate in response["llm_response"]["choices"]:
message = ChatJavelinAIGateway._convert_dict_to_message(
candidate["message"]
)
message_metadata = candidate.get("metadata", {})
gen = ChatGeneration(
message=message,
generation_info=dict(message_metadata),
)
generations.append(gen)
response_metadata = response.get("metadata", {})
return ChatResult(generations=generations, llm_output=response_metadata)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@chat_models@[email protected]_END.py
|
{
"filename": "echelle_orders.py",
"repo_name": "mtalapinto/moes",
"repo_path": "fideos/optics/echelle_orders.py",
"type": "Python"
}
|
import numpy as np
from optics import transform
import pandas as pd
def init():
wav_N = 500 # 1575
wav_lo = 0.36 # in microns
wav_hi = 0.71
blaze_angle = 70. * np.pi / 180
G = 44.41 * 1e-3 # lines per um
d = 1 / G
# n=
ord_blu = int(2 * d * np.sin(blaze_angle) / wav_lo)
ord_red = int(2 * d * np.sin(blaze_angle) / wav_hi)
# print('Creating echelle orders...')
spectrum = []
order = []
wave = []
Hs = []
DCs = []
x = []
y = []
z = []
dx = []
dy = []
dz = []
flux = []
while ord_red < ord_blu + 1:
wav_blz = 2 * np.sin(blaze_angle) / (G * ord_red)
wav_min = wav_blz - wav_blz / (2 * ord_red) - 0.008
wav_max = wav_blz + wav_blz / (2 * ord_red) + 0.008
dwav = (wav_max - wav_min) / wav_N
k = 0
while k <= wav_N:
H = np.zeros([3])
DC = np.zeros([3])
order.append(ord_red)
wave.append(wav_min)
Hs.append(H)
DCs.append(DC)
single_element = (ord_red, wav_min)
x.append(0)
y.append(0)
z.append(0)
dx.append(0)
dy.append(0)
dz.append(-1.)
flux.append(1.)
spectrum.append(single_element)
# file.write('%f ' % wav_min)
wav_min += dwav
k += 1
# file.write('\n')
ord_red += 1
# print('Loading spectrum... Done\n')
specout = pd.DataFrame()
specout['order'] = order
specout['wave'] = wave
specout['x'] = x
specout['y'] = y
specout['z'] = z
specout['dx'] = dx
specout['dy'] = dy
specout['dz'] = dz
specout['flux'] = flux
return specout
def dispersion(s, params, dpix):
alpha = np.full(len(s), np.abs(params[7] * np.pi / 180))
g = np.full(len(s), params[5] * 1e-3)
gamma = np.full(len(s), params[6] * np.pi / 180)
f = np.full(len(s), 455.)
pix_size = np.full(len(s), 15., dtype=float)
beta = np.arcsin(np.abs(s[:, 0]) * g * s[:, 1] / np.cos(gamma) - np.sin(alpha))
dg = s[:, 0] * g * f * np.full(len(s), 1e3) / (np.cos(gamma) * np.cos(beta) * pix_size)
dl = dpix / dg
c = np.full(len(s), 3e8)
dv = c * dl / s[:, 1]
return dv
def init_cone(frat):
# Creation of angles grid depending on focal ratio
na = 1/(2*frat)
theta = np.arcsin(na)
dtheta = 0.1
tx_max = theta*180/np.pi
ty_max = theta*180/np.pi
tx_min = -theta*180/np.pi
ty_min = -theta*180/np.pi
Tgrid = []
while tx_min < tx_max:
#print(tx_min)
while ty_min < ty_max:
Taux = np.array([tx_min*np.pi/180, ty_min*np.pi/180, 0.])
#print(Taux)
Tgrid.append(Taux)
ty_min += dtheta
tx_min += dtheta
ty_min = -theta * 180 / np.pi
Tgrid = np.array(Tgrid)
# We create echelle orders
wav_N = 3 # 1575
wav_lo = 0.4 # in microns
wav_hi = 0.68
blaze_angle = 70. * np.pi / 180
G = 44.41 * 1e-3 # lines per um
d = 1 / G
ord_blu = int(2 * d * np.sin(blaze_angle) / wav_lo) + 1
ord_red = int(2 * d * np.sin(blaze_angle) / wav_hi)
spectrum = []
while ord_red < ord_blu + 1:
wav_blz = 2 * np.sin(blaze_angle) / (G * ord_red)
wav_min = wav_blz - wav_blz / (2 * ord_red) - 0.003
wav_max = wav_blz + wav_blz / (2 * ord_red) + 0.0022
dwav = (wav_max - wav_min) / wav_N
k = 0
while k <= wav_N:
for l in range(len(Tgrid)):
H = np.zeros([3])
DC = np.zeros([3])
DC[2] = -1.
#print(Tgrid[l])
DCt = transform.transform_single(DC, Tgrid[l])
#int(DCt)
#print(DCt[0])
single_element = (ord_red, wav_min, H[0], H[1], H[2], DCt[0][0], DCt[0][1], DCt[0][2])
spectrum.append(np.array(single_element))
wav_min += dwav
k += 1
#file.write('\n')
ord_red += 1
#print(Tgrid)
spectrum = np.array(spectrum)
spectrum = pd.DataFrame(spectrum, columns=['order','wave','x','y','z','dx','dy','dz'])
print(spectrum)
return spectrum
def init_stellar_doppler_simple(rv, ord_red):
rvbase = 0.0 # m/s
rvfinal = rvbase + rv
# basepath = '/home/eduspec/Documentos/moes/fideos_moes/'
# outpath = '/media/eduspec/TOSHIBA EXT/platospec/data/f' + str(int(fcam)) + 'mm/slit/' + str(int(rv)) + '/'
# outpath = '/home/eduspec/Documentos/moes/platospec/data/f' + str(int(fcam)) + 'mm/slit/' + str(int(rv)) + '/'
# stardir = '/home/eduspec/Documentos/moes/P_s5700g4.50z0.0t0.97_a0.00c0.00n0.00o0.00_VIS.spec.flat/'
stardir = 'stellar_template/'
stellar_spec = pd.read_csv(stardir + 'stellar_template_resampled.tsv',
sep=',')
# if not os.path.isdir(outpath):
# os.mkdir(outpath)
blaze_angle = 70. * np.pi / 180
G = 44.41 * 1e-3 # lines per um
d = 1 / G
fcol = 762
ordout, waveout, x, y, z, dcx, dcy, dcz, fluxout = [], [], [], [], [], [], [], [], []
print('Creating slit echelle order ', ord_red, 'rv = ', rv)
wav_blz = 2 * np.sin(blaze_angle) / (G * ord_red)
wav_min = wav_blz - wav_blz / (2 * ord_red) - 0.002
wav_max = wav_blz + wav_blz / (2 * ord_red) + 0.002
wmin = wav_min * 1e4
wmax = wav_max * 1e4
stellardata = stellar_spec.loc[stellar_spec['wave'] < wmax]
stellardata = stellardata.loc[stellardata['wave'] > wmin]
stellardata['wave_new'] = stellardata['wave'] * (1 + rvfinal / 3.e8)
for k in range(len(stellardata)):
H = np.zeros([3])
H[0] = 0
H[1] = 0
H[2] = 0
DC = np.zeros([3])
DC[2] = -1.
ordout.append(float(ord_red))
waveout.append(float((stellardata['wave_new'].values[k]) * 1e-4))
x.append(float(H[0]))
y.append(float(H[1]))
z.append(float(H[2]))
dcx.append(float(DC[0]))
dcy.append(float(DC[1]))
dcz.append(float(DC[2]))
fluxout.append(float(stellardata['flux'].values[k]))
slitout = pd.DataFrame()
slitout['order'] = ordout
slitout['wave'] = waveout
slitout['x'] = x
slitout['y'] = y
slitout['z'] = z
slitout['dx'] = dcx
slitout['dy'] = dcy
slitout['dz'] = dcz
slitout['flux'] = fluxout
print('Slit array created...')
return slitout
def init_g2mask(mask, ord):
# print('Creating echelle orders...')
spectrum = []
order = []
wave = []
Hs = []
DCs = []
x = []
y = []
z = []
dx = []
dy = []
dz = []
flux = []
for k in range(len(mask)):
H = np.zeros([3])
DC = np.zeros([3])
order.append(ord)
wave.append(mask['wave'].values[k])
Hs.append(H)
DCs.append(DC)
single_element = (ord, mask['wave'].values[k])
x.append(0)
y.append(0)
z.append(0)
dx.append(0)
dy.append(0)
dz.append(-1.)
flux.append(1.)
spectrum.append(single_element)
specout = pd.DataFrame()
specout['order'] = order
specout['wave'] = wave
specout['x'] = x
specout['y'] = y
specout['z'] = z
specout['dx'] = dx
specout['dy'] = dy
specout['dz'] = dz
specout['flux'] = flux
return specout
if __name__ == '__main__':
spec = init_cone(22)
|
mtalapintoREPO_NAMEmoesPATH_START.@fideos@optics@[email protected]_END.py
|
{
"filename": "Scatter Density vs. Violin Plot Comparison.ipynb",
"repo_name": "shaoshanglqy/shap-shapley",
"repo_path": "shap-shapley_extracted/shap-shapley-master/notebooks/tree_explainer/Scatter Density vs. Violin Plot Comparison.ipynb",
"type": "Jupyter Notebook"
}
|
# Scatter Density vs. Violin Plot
This gives several examples to compare the dot density vs. violin plot options for summary_plot.
```python
import xgboost
import shap
# train xgboost model on diabetes data:
X, y = shap.datasets.diabetes()
bst = xgboost.train({"learning_rate": 0.01}, xgboost.DMatrix(X, label=y), 100)
# explain the model's prediction using SHAP values on the first 1000 training data samples
shap_values = shap.TreeExplainer(bst).shap_values(X)
```
## Layered violin plot
Without color, this plot can simply display the distribution of importance for each variable as a standard violin plot.
```python
shap.summary_plot(shap_values[:1000,:], X.iloc[:1000,:], plot_type="layered_violin", color='#cccccc')
```

For example, in the above, we can see that `s5` is the most important variable, and generally it causes either a large positive or negative change in the prediction. However, is it large values of `s5` that cause a positive change and small ones that cause a negative change - or vice versa, or something more complicated? If we use color to represent the largeness/smallness of the feature, then this becomes apparent:
```python
shap.summary_plot(shap_values[:1000,:], X.iloc[:1000,:], plot_type="layered_violin", color='coolwarm')
```

Here, red represents large values of a variable, and blue represents small ones. So, it becomes clear that *large* values of `s5` do indeed increase the prediction, and vice versa. You can also see that others (like `s6`) are pretty evenly split, which indicates that while overall they're still important, their interaction is dependent on other variables. (After all, the whole point of a tree model like xgboost is to capture these interactions, so we can't expect to see everything in a single dimension!)
> Note that the order of the color isn't important: each violin is actually a number (`layered_violin_max_num_bins`) of individual smoothed shapes stacked on top of each other, where each shape corresponds to a certain percentile of the feature (e.g. `the 5-10% percentile of s5 values`). These are always drawn with small values first (and hence closest to the x-axis) and large values last (hence on the 'edge'), and that's why in this case you always see the red on the edge and the blue in the middle. (You could, of course switch this round by using a different color map, but the point is that the order of red inside/outside blue has no inherent meaning.)
There are other options you can play with, if you wish. Most notable is the `layered_violin_max_num_bins` mentioned above. This has an additional effect that if the feature has less that `layered_violin_max_num_bins` unique values, then instead of partitioning each section as as a percentile (the `5-10%` above), we make each section represent a specific value. For example, since `sex` has only two values, here blue will mean male (or female?) and read means female (or male?). Not sure with the diabetes data if a higher value of sex means male or female
<!-- commenting this out for the public repo since there is a fair amount of opinion here.
#### Pros
- look great
- easily interpretable (with color): people can generally get the idea without having to explain in detail
- both of these meant they're good to show laymen/clients in presentations etc.
#### Cons
- take longer to draw (only relevant if you're doing heaps)
- can be hard to get the smoothing just right
- the code isn't as well supported, so if you want to tweak it, you might have to hack the code yourself-->
## Dot plot
This combines a scatter plot with density estimation by letting dots pile up when they don't fit. The advatange of this approach is that it does not hide anything behind kernel smoothing, so what-you-see-is-what-is-there.
```python
shap.summary_plot(shap_values[:1000,:], X.iloc[:1000,:])
```

<!--#### Pros
- if you're looking for really fine features, and your data doesn't have the problems below, then this might be better. However, you probably shouldn't be using a graph to discover such fine features.
#### Cons
- generally doesn't look as nice for most data sets
- can be quite noisy - no smoothing etc. This generally makes it harder to interpret the 'obvious' results.
- the plot will depend on the order the dots are drawn (since they'll overlap etc.). In other words it's possible that you could get very different looking plots with the same data. You can get round this somewhat by using a very low opacity - but this then makes the non-overlapping parts of the graph hard to read.
- [Note: this issue could be fixed somewhat if the y-value of the dots are given specific meaning (as with the layered violin plot) to avoid plots of different color overlapping. Though then it'd just be the layered violin plot.]
- doesn't support categorical data (see the comment for the layered violin plot).-->
## Violin plot
These are a standard violin plot but with outliers drawn as points. This gives a more accurate representation of the density out the outliers than a kernel density estimated from so few points. The color represents the average feature value at that position, so red regions have mostly high valued feature values while blue regions have mostly low feature values.
```python
shap.summary_plot(shap_values[:1000,:], X.iloc[:1000,:], plot_type="violin")
```

|
shaoshanglqyREPO_NAMEshap-shapleyPATH_START.@shap-shapley_extracted@shap-shapley-master@notebooks@tree_explainer@Scatter Density vs. Violin Plot [email protected]_END.py
|
{
"filename": "run_astropy_tests.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/.pyinstaller/run_astropy_tests.py",
"type": "Python"
}
|
import os
import shutil
import sys
import erfa # noqa: F401
import matplotlib as mpl
import pytest
import astropy # noqa: F401
if len(sys.argv) == 3 and sys.argv[1] == "--astropy-root":
ROOT = sys.argv[2]
else:
# Make sure we don't allow any arguments to be passed - some tests call
# sys.executable which becomes this script when producing a pyinstaller
# bundle, but we should just error in this case since this is not the
# regular Python interpreter.
if len(sys.argv) > 1:
print("Extra arguments passed, exiting early")
sys.exit(1)
for root, dirnames, files in os.walk(os.path.join(ROOT, "astropy")):
# NOTE: we can't simply use
# test_root = root.replace('astropy', 'astropy_tests')
# as we only want to change the one which is for the module, so instead
# we search for the last occurrence and replace that.
pos = root.rfind("astropy")
test_root = root[:pos] + "astropy_tests" + root[pos + 7 :]
# Copy over the astropy 'tests' directories and their contents
for dirname in dirnames:
final_dir = os.path.relpath(os.path.join(test_root, dirname), ROOT)
# We only copy over 'tests' directories, but not astropy/tests (only
# astropy/tests/tests) since that is not just a directory with tests.
if dirname == "tests" and not root.endswith("astropy"):
shutil.copytree(os.path.join(root, dirname), final_dir, dirs_exist_ok=True)
else:
# Create empty __init__.py files so that 'astropy_tests' still
# behaves like a single package, otherwise pytest gets confused
# by the different conftest.py files.
init_filename = os.path.join(final_dir, "__init__.py")
if not os.path.exists(os.path.join(final_dir, "__init__.py")):
os.makedirs(final_dir, exist_ok=True)
with open(os.path.join(final_dir, "__init__.py"), "w") as f:
f.write("#")
# Copy over all conftest.py files
for file in files:
if file == "conftest.py":
final_file = os.path.relpath(os.path.join(test_root, file), ROOT)
shutil.copy2(os.path.join(root, file), final_file)
# Add the top-level __init__.py file
with open(os.path.join("astropy_tests", "__init__.py"), "w") as f:
f.write("#")
# Remove test file that tries to import all sub-packages at collection time
os.remove(
os.path.join("astropy_tests", "utils", "iers", "tests", "test_leap_second.py")
)
# Remove convolution tests for now as there are issues with the loading of the C extension.
# FIXME: one way to fix this would be to migrate the convolution C extension away from using
# ctypes and using the regular extension mechanism instead.
shutil.rmtree(os.path.join("astropy_tests", "convolution"))
os.remove(os.path.join("astropy_tests", "modeling", "tests", "test_convolution.py"))
os.remove(os.path.join("astropy_tests", "modeling", "tests", "test_core.py"))
os.remove(os.path.join("astropy_tests", "visualization", "tests", "test_lupton_rgb.py"))
# FIXME: PIL minversion check does not work
os.remove(
os.path.join("astropy_tests", "visualization", "wcsaxes", "tests", "test_misc.py")
)
os.remove(
os.path.join("astropy_tests", "visualization", "wcsaxes", "tests", "test_wcsapi.py")
)
# FIXME: The following tests rely on the fully qualified name of classes which
# don't seem to be the same.
os.remove(os.path.join("astropy_tests", "table", "mixins", "tests", "test_registry.py"))
# Copy the top-level conftest.py
shutil.copy2(
os.path.join(ROOT, "astropy", "conftest.py"),
os.path.join("astropy_tests", "conftest.py"),
)
# matplotlib hook in pyinstaller 5.0 and later no longer collects every backend, see
# https://github.com/pyinstaller/pyinstaller/issues/6760
mpl.use("svg")
# We skip a few tests, which are generally ones that rely on explicitly
# checking the name of the current module (which ends up starting with
# astropy_tests rather than astropy).
SKIP_TESTS = [
"test_exception_logging_origin",
"test_log",
"test_configitem",
"test_config_noastropy_fallback",
"test_no_home",
"test_path",
"test_rename_path",
"test_data_name_third_party_package",
"test_pkg_finder",
"test_wcsapi_extension",
"test_find_current_module_bundle",
"test_minversion",
"test_imports",
"test_generate_config",
"test_generate_config2",
"test_create_config_file",
"test_download_parallel_fills_cache",
]
# Run the tests!
sys.exit(
pytest.main(
["astropy_tests", "-k " + " and ".join("not " + test for test in SKIP_TESTS)],
plugins=[
"pytest_astropy.plugin",
"pytest_doctestplus.plugin",
"pytest_remotedata.plugin",
"pytest_astropy_header.display",
],
)
)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@[email protected]@[email protected]_END.py
|
{
"filename": "admin.py",
"repo_name": "simonsobs/TeleView",
"repo_path": "TeleView_extracted/TeleView-main/tvapi/api/admin.py",
"type": "Python"
}
|
from django.contrib import admin
from .models import StatusModel, SchedulerState, SchedulerQueue
admin.site.register(StatusModel)
admin.site.register(SchedulerState)
admin.site.register(SchedulerQueue)
|
simonsobsREPO_NAMETeleViewPATH_START.@TeleView_extracted@TeleView-main@tvapi@[email protected]@.PATH_END.py
|
{
"filename": "dump.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/sqlite3/dump.py",
"type": "Python"
}
|
# Mimic the sqlite3 console shell's .dump command
# Author: Paul Kippes <[email protected]>
# Every identifier in sql is quoted based on a comment in sqlite
# documentation "SQLite adds new keywords from time to time when it
# takes on new features. So to prevent your code from being broken by
# future enhancements, you should normally quote any identifier that
# is an English language word, even if you do not have to."
def _iterdump(connection):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
writeable_schema = False
cu = connection.cursor()
cu.row_factory = None # Make sure we get predictable results.
yield('BEGIN TRANSACTION;')
# sqlite_master table contains the SQL CREATE statements for the database.
q = """
SELECT "name", "type", "sql"
FROM "sqlite_master"
WHERE "sql" NOT NULL AND
"type" == 'table'
ORDER BY "name"
"""
schema_res = cu.execute(q)
sqlite_sequence = []
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
rows = cu.execute('SELECT * FROM "sqlite_sequence";').fetchall()
sqlite_sequence = ['DELETE FROM "sqlite_sequence"']
sqlite_sequence += [
f'INSERT INTO "sqlite_sequence" VALUES(\'{row[0]}\',{row[1]})'
for row in rows
]
continue
elif table_name == 'sqlite_stat1':
yield('ANALYZE "sqlite_master";')
elif table_name.startswith('sqlite_'):
continue
elif sql.startswith('CREATE VIRTUAL TABLE'):
if not writeable_schema:
writeable_schema = True
yield('PRAGMA writable_schema=ON;')
yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"
"VALUES('table','{0}','{0}',0,'{1}');".format(
table_name.replace("'", "''"),
sql.replace("'", "''"),
))
else:
yield('{0};'.format(sql))
# Build the insert statement for each row of the current table
table_name_ident = table_name.replace('"', '""')
res = cu.execute('PRAGMA table_info("{0}")'.format(table_name_ident))
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = """SELECT 'INSERT INTO "{0}" VALUES({1})' FROM "{0}";""".format(
table_name_ident,
",".join("""'||quote("{0}")||'""".format(col.replace('"', '""')) for col in column_names))
query_res = cu.execute(q)
for row in query_res:
yield("{0};".format(row[0]))
# Now when the type is 'index', 'trigger', or 'view'
q = """
SELECT "name", "type", "sql"
FROM "sqlite_master"
WHERE "sql" NOT NULL AND
"type" IN ('index', 'trigger', 'view')
"""
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
yield('{0};'.format(sql))
if writeable_schema:
yield('PRAGMA writable_schema=OFF;')
# gh-79009: Yield statements concerning the sqlite_sequence table at the
# end of the transaction.
for row in sqlite_sequence:
yield('{0};'.format(row))
yield('COMMIT;')
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pip/vendor/html5lib/filters/__init__.py",
"type": "Python"
}
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@pip@vendor@html5lib@filters@[email protected]_END.py
|
|
{
"filename": "combinations.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/framework/combinations.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module customizes `test_combinations` for Tensorflow.
Additionally it provides `generate()`, `combine()` and `times()` with Tensorflow
customizations as a default.
"""
import functools
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_combinations
from tensorflow.python.util.tf_export import tf_export
class EagerGraphCombination(test_combinations.TestCombination):
"""Run the test in Graph or Eager mode.
The optional `mode` parameter controls the test's execution mode. Its
accepted values are "graph" or "eager" literals.
"""
def context_managers(self, kwargs):
mode = kwargs.pop("mode", None)
if mode is None:
return []
elif mode == "eager":
return [context.eager_mode()]
elif mode == "graph":
return [ops.Graph().as_default(), context.graph_mode()]
else:
raise ValueError(
"Argument 'mode' must be either 'eager' or 'graph'. "
f"Received: {mode}.")
def parameter_modifiers(self):
return [test_combinations.OptionalParameter("mode")]
class TFVersionCombination(test_combinations.TestCombination):
"""Control the execution of the test in TF1.x and TF2.
If TF2 is enabled then a test with TF1 test is going to be skipped and vice
versa.
Test targets continuously run in TF2 thanks to the tensorflow.v2 TAP target.
A test can be run in TF2 with bazel by passing --test_env=TF2_BEHAVIOR=1.
"""
def should_execute_combination(self, kwargs):
tf_api_version = kwargs.pop("tf_api_version", None)
if tf_api_version == 1 and tf2.enabled():
return (False, "Skipping a TF1.x test when TF2 is enabled.")
elif tf_api_version == 2 and not tf2.enabled():
return (False, "Skipping a TF2 test when TF2 is not enabled.")
return (True, None)
def parameter_modifiers(self):
return [test_combinations.OptionalParameter("tf_api_version")]
generate = functools.partial(
test_combinations.generate,
test_combinations=(EagerGraphCombination(), TFVersionCombination()))
combine = test_combinations.combine
times = test_combinations.times
NamedObject = test_combinations.NamedObject
tf_export("__internal__.test.combinations.generate", v1=[])(generate)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@[email protected]@.PATH_END.py
|
{
"filename": "td_mag_likelihood.py",
"repo_name": "sibirrer/hierArc",
"repo_path": "hierArc_extracted/hierArc-main/hierarc/Likelihood/LensLikelihood/td_mag_likelihood.py",
"type": "Python"
}
|
import numpy as np
from lenstronomy.Util import constants as const
from lenstronomy.Util.data_util import magnitude2cps
class TDMagLikelihood(object):
"""Likelihood of time delays and magnification likelihood.
This likelihood uses linear flux units and linear lensing magnifications.
"""
def __init__(
self,
time_delay_measured,
cov_td_measured,
amp_measured,
cov_amp_measured,
fermat_diff,
magnification_model,
cov_model,
magnitude_zero_point=20,
):
"""
:param time_delay_measured: array, relative time delays (relative to the first image) [days]
:param cov_td_measured: 2d array, error covariance matrix of time delay measurement [days^2]
:param amp_measured: array, amplitudes of measured fluxes of image positions
:param cov_amp_measured: 2d array, error covariance matrix of the measured amplitudes
:param fermat_diff: mean Fermat potential differences (relative to the first image) in arcsec^2
:param magnification_model: mean magnification of the model prediction
:param cov_model: 2d array (length relative time delays + image amplitudes); model fermat potential differences
and lensing magnification covariances
:param magnitude_zero_point: magnitude zero point for which the image amplitudes and covariance matrix are
defined
"""
self._data_vector = np.append(time_delay_measured, amp_measured)
self._cov_td_measured = np.array(cov_td_measured)
self._cov_amp_measured = np.array(cov_amp_measured)
# check sizes of covariances matches
n_tot = len(self._data_vector)
self._n_td = len(time_delay_measured)
self._n_amp = len(amp_measured)
assert self._n_td == len(cov_td_measured)
assert self._n_amp == len(cov_amp_measured)
assert n_tot == len(cov_model)
# merge data covariance matrices from time delay and image amplitudes
self._cov_data = np.zeros((n_tot, n_tot))
self._cov_data[: self._n_td, : self._n_td] = self._cov_td_measured
self._cov_data[self._n_td :, self._n_td :] = self._cov_amp_measured
# self._fermat_diff = fermat_diff # in units arcsec^2
self._fermat_unit_conversion = (
const.Mpc / const.c / const.day_s * const.arcsec**2
)
# self._mag_model = mag_model
self._model_tot = np.append(fermat_diff, magnification_model)
self._cov_model = cov_model
self.num_data = n_tot
self._magnitude_zero_point = magnitude_zero_point
def log_likelihood(self, ddt, mu_intrinsic):
"""
:param ddt: time-delay distance (physical Mpc)
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return: log likelihood of the measured magnified images given the source brightness
"""
model_vector, cov_tot = self._model_cov(ddt, mu_intrinsic)
# invert matrix
try:
cov_tot_inv = np.linalg.inv(cov_tot)
except:
return -np.inf
# difference to data vector
delta = self._data_vector - model_vector
# evaluate likelihood
lnlikelihood = -delta.dot(cov_tot_inv.dot(delta)) / 2.0
sign_det, lndet = np.linalg.slogdet(cov_tot)
lnlikelihood -= 1 / 2.0 * (self.num_data * np.log(2 * np.pi) + lndet)
return lnlikelihood
def _model_cov(self, ddt, mu_intrinsic):
"""Combined covariance matrix of the data and model when marginialized over the
Gaussian model uncertainties in the Fermat potential and magnification.
:param ddt: time-delay distance (physical Mpc)
:param mu_intrinsic: intrinsic brightness of the source (already incorporating
the inverse MST transform)
:return: model vector, combined covariance matrix
"""
# compute model predicted magnified image amplitude and time delay
amp_intrinsic = magnitude2cps(
magnitude=mu_intrinsic, magnitude_zero_point=self._magnitude_zero_point
)
model_scale = np.append(
ddt * self._fermat_unit_conversion * np.ones(self._n_td),
amp_intrinsic * np.ones(self._n_amp),
)
model_vector = model_scale * self._model_tot
# scale model covariance matrix with model_scale vector (in quadrature)
cov_model = model_scale * (self._cov_model * model_scale).T
# combine data and model covariance matrix
cov_tot = self._cov_data + cov_model
return model_vector, cov_tot
|
sibirrerREPO_NAMEhierArcPATH_START.@hierArc_extracted@hierArc-main@hierarc@Likelihood@LensLikelihood@[email protected]_END.py
|
{
"filename": "logm0_c1_early.py",
"repo_name": "ArgonneCPAC/diffmah",
"repo_path": "diffmah_extracted/diffmah-main/diffmah/diffmahpop_kernels/bimod_logm0_kernels/logm0_c1_early.py",
"type": "Python"
}
|
"""
"""
from collections import OrderedDict, namedtuple
from jax import jit as jjit
from jax import numpy as jnp
from jax import value_and_grad, vmap
from ...bfgs_wrapper import bfgs_adam_fallback
from ...utils import _inverse_sigmoid, _sig_slope, _sigmoid
DEFAULT_LGM0POP_C1_PDICT = OrderedDict(
lgm0pop_c1_ytp_early=0.002,
lgm0pop_c1_ylo_early=-0.043,
lgm0pop_c1_clip_x0_early=7.185,
lgm0pop_c1_clip_ylo_early=0.140,
lgm0pop_c1_clip_yhi_early=0.002,
lgm0pop_c1_t_obs_x0_early=3.01,
)
LGM0Pop_C1_Params = namedtuple("LGM0Pop_C1_Params", DEFAULT_LGM0POP_C1_PDICT.keys())
DEFAULT_LGM0POP_C1_PARAMS = LGM0Pop_C1_Params(**DEFAULT_LGM0POP_C1_PDICT)
LGM0POP_C1_BOUNDS_DICT = OrderedDict(
lgm0pop_c1_ytp_early=(0.001, 0.1),
lgm0pop_c1_ylo_early=(-0.05, -0.001),
lgm0pop_c1_clip_x0_early=(4.0, 11.0),
lgm0pop_c1_clip_ylo_early=(0.02, 0.15),
lgm0pop_c1_clip_yhi_early=(0.001, 0.05),
lgm0pop_c1_t_obs_x0_early=(3.0, 10.0),
)
LGM0POP_C1_BOUNDS = LGM0Pop_C1_Params(**LGM0POP_C1_BOUNDS_DICT)
_C1_UPNAMES = ["u_" + key for key in LGM0Pop_C1_Params._fields]
LGM0Pop_C1_UParams = namedtuple("LGM0Pop_C1_UParams", _C1_UPNAMES)
XTP = 10.0
GLOBAL_K = 0.25
CLIP_TP_K = 1.0
K_BOUNDING = 0.1
@jjit
def _pred_c1_kern(params, t_obs, t_peak):
pred_c1 = _sig_slope(
t_obs,
XTP,
params.lgm0pop_c1_ytp_early,
params.lgm0pop_c1_t_obs_x0_early,
GLOBAL_K,
params.lgm0pop_c1_ylo_early,
0.0,
)
clip = _sigmoid(
t_peak,
params.lgm0pop_c1_clip_x0_early,
CLIP_TP_K,
params.lgm0pop_c1_clip_ylo_early,
params.lgm0pop_c1_clip_yhi_early,
)
pred_c1 = jnp.clip(pred_c1, min=clip)
return pred_c1
@jjit
def _mse(x, y):
d = y - x
return jnp.mean(d * d)
@jjit
def _loss_kern_scalar(params, loss_data):
t_obs, t_peak, target_c1 = loss_data
pred_c1 = _pred_c1_kern(params, t_obs, t_peak)
return _mse(target_c1, pred_c1)
@jjit
def global_loss_kern(params, global_loss_data):
loss = 0.0
for loss_data in global_loss_data:
loss = loss + _loss_kern_scalar(params, loss_data)
return loss
global_loss_and_grads_kern = jjit(value_and_grad(global_loss_kern))
def fit_global_c1_model(global_loss_data, p_init=DEFAULT_LGM0POP_C1_PARAMS):
_res = bfgs_adam_fallback(global_loss_and_grads_kern, p_init, global_loss_data)
p_best, loss_best, fit_terminates, code_used = _res
return p_best, loss_best, fit_terminates, code_used
@jjit
def _get_bounded_c1_param(u_param, bound):
lo, hi = bound
mid = 0.5 * (lo + hi)
return _sigmoid(u_param, mid, K_BOUNDING, lo, hi)
@jjit
def _get_unbounded_c1_param(param, bound):
lo, hi = bound
mid = 0.5 * (lo + hi)
return _inverse_sigmoid(param, mid, K_BOUNDING, lo, hi)
_C = (0, 0)
_get_bounded_c1_params_kern = jjit(vmap(_get_bounded_c1_param, in_axes=_C))
_get_unbounded_c1_params_kern = jjit(vmap(_get_unbounded_c1_param, in_axes=_C))
@jjit
def get_bounded_c1_params(u_params):
u_params = jnp.array([getattr(u_params, u_pname) for u_pname in _C1_UPNAMES])
params = _get_bounded_c1_params_kern(
jnp.array(u_params), jnp.array(LGM0POP_C1_BOUNDS)
)
params = LGM0Pop_C1_Params(*params)
return params
@jjit
def get_unbounded_c1_params(params):
params = jnp.array([getattr(params, pname) for pname in LGM0Pop_C1_Params._fields])
u_params = _get_unbounded_c1_params_kern(
jnp.array(params), jnp.array(LGM0POP_C1_BOUNDS)
)
u_params = LGM0Pop_C1_UParams(*u_params)
return u_params
DEFAULT_LGM0POP_C1_U_PARAMS = LGM0Pop_C1_UParams(
*get_unbounded_c1_params(DEFAULT_LGM0POP_C1_PARAMS)
)
|
ArgonneCPACREPO_NAMEdiffmahPATH_START.@diffmah_extracted@diffmah-main@diffmah@diffmahpop_kernels@bimod_logm0_kernels@[email protected]_END.py
|
{
"filename": "batch.py",
"repo_name": "minoshim/qasMHD",
"repo_path": "qasMHD_extracted/qasMHD-main/2D/MPI/python/batch.py",
"type": "Python"
}
|
# Python3 script to load and draw MHD-2D(MPI-merged) data
# Packages Numpy and Matplotlib are required.
# Call the script in command line:
# > python batch.py
# Call the script in Python3 interactive mode:
# >>> exec(open("batch.py").read())
import numpy as np
import matplotlib.pyplot as plt
from python import plt2d
#Read independent variables and parameters
while True:
direc=input("Input data directory (Ctrl-D to exit): ")+"/"
try:
x=np.loadtxt(direc+"merge_x.dat",dtype=float)
y=np.loadtxt(direc+"merge_y.dat",dtype=float)
t=np.atleast_1d(np.loadtxt(direc+"t.dat",dtype=float))
para=np.atleast_1d(np.loadtxt(direc+"params.dat",dtype=float))
break
except:
print("Error during file load.")
gam=para[0]
#Number of elements
nx=np.size(x)
ny=np.size(y)
nt=np.size(t)
nd=8 #Number of dependent variables in MHD-2D
#Read MHD data @ particular time
sst=-1
while ((sst < 0) or (sst >= nt)):
sst=int(input(f"Specity time period (0-{nt-1}): "))
data=np.fromfile(direc+f"merge_outdat_{sst:05d}.dat",dtype=np.float32).reshape((nd,ny,nx))
dx=x[1]-x[0]
dy=y[1]-y[0]
#Primitive variables
ro=data[0,:,:]
vx=data[1,:,:]/ro
vy=data[2,:,:]/ro
vz=data[3,:,:]/ro
bx=data[4,:,:] # @ CT grid (i-1/2,j)
by=data[5,:,:] # @ CT grid (i,j-1/2)
bz=data[6,:,:]
en=data[7,:,:]
pr=(gam-1)*(en-0.5*(ro*(vx**2+vy**2+vz**2)+(bx**2+by**2+bz**2)))
# Current
jz=np.zeros((ny,nx))
for j in range(1,ny):
for i in range(1,nx):
jz[j,i]=(by[j,i]-by[j,i-1])/dx-(bx[j,i]-bx[j-1,i])/dy # @ corner (i-1/2,j-1/2)
# #Plot
val=pr/ro
a=plt2d.image(x=x,y=y,val=val,save=0,title=f"t={t[sst]:.2f}",show=1)
|
minoshimREPO_NAMEqasMHDPATH_START.@qasMHD_extracted@qasMHD-main@2D@MPI@[email protected]@.PATH_END.py
|
{
"filename": "lensed_position.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/PointSource/Types/lensed_position.py",
"type": "Python"
}
|
import numpy as np
from lenstronomy.PointSource.Types.base_ps import PSBase, _expand_to_array
__all__ = ["LensedPositions"]
class LensedPositions(PSBase):
"""
class of a lensed point source parameterized as the (multiple) observed image positions
Name within the PointSource module: 'LENSED_POSITION'
parameters: ra_image, dec_image, point_amp
If fixed_magnification=True, than 'source_amp' is a parameter instead of 'point_amp'
"""
# def __init__(self, lens_model=None, fixed_magnification=False, additional_image=False):
# super(LensedPositions, self).__init__(lens_model=lens_model, fixed_magnification=fixed_magnification,
# additional_image=additional_image)
def image_position(
self,
kwargs_ps,
kwargs_lens=None,
magnification_limit=None,
kwargs_lens_eqn_solver=None,
additional_images=False,
):
"""On-sky image positions.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when
requiring the lens equation solver
:param magnification_limit: float >0 or None, if float is set and additional
images are computed, only those images will be computed that exceed the
lensing magnification (absolute value) limit
:param kwargs_lens_eqn_solver: keyword arguments specifying the numerical
settings for the lens equation solver see LensEquationSolver() class for
details
:param additional_images: if True, solves the lens equation for additional
images
:type additional_images: bool
:return: image positions in x, y as arrays
"""
if self.additional_images is True or additional_images:
if kwargs_lens_eqn_solver is None:
kwargs_lens_eqn_solver = {}
ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)
# TODO: this solver does not distinguish between different frames/bands with partial lens models
self._solver.change_source_redshift(self._redshift)
ra_image, dec_image = self._solver.image_position_from_source(
ra_source,
dec_source,
kwargs_lens,
magnification_limit=magnification_limit,
**kwargs_lens_eqn_solver
)
else:
ra_image = kwargs_ps["ra_image"]
dec_image = kwargs_ps["dec_image"]
return np.array(ra_image), np.array(dec_image)
def source_position(self, kwargs_ps, kwargs_lens=None):
"""Original source position (prior to lensing)
:param kwargs_ps: point source keyword arguments
:param kwargs_lens: lens model keyword argument list (required to ray-trace back
in the source plane)
:return: x, y position (as numpy arrays)
"""
ra_image = kwargs_ps["ra_image"]
dec_image = kwargs_ps["dec_image"]
self._lens_model.change_source_redshift(self._redshift)
if self.k_list is None:
x_source, y_source = self._lens_model.ray_shooting(
ra_image, dec_image, kwargs_lens
)
else:
x_source, y_source = [], []
for i in range(len(ra_image)):
x, y = self._lens_model.ray_shooting(
ra_image[i], dec_image[i], kwargs_lens, k=self.k_list[i]
)
x_source.append(x)
y_source.append(y)
x_source = np.mean(x_source)
y_source = np.mean(y_source)
return np.array(x_source), np.array(y_source)
def image_amplitude(
self,
kwargs_ps,
kwargs_lens=None,
x_pos=None,
y_pos=None,
magnification_limit=None,
kwargs_lens_eqn_solver=None,
):
"""Image brightness amplitudes.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when
requiring the lens equation solver
:param x_pos: pre-computed image position (no lens equation solver applied)
:param y_pos: pre-computed image position (no lens equation solver applied)
:param magnification_limit: float >0 or None, if float is set and additional
images are computed, only those images will be computed that exceed the
lensing magnification (absolute value) limit
:param kwargs_lens_eqn_solver: keyword arguments specifying the numerical
settings for the lens equation solver see LensEquationSolver() class for
details
:return: array of image amplitudes
"""
self._lens_model.change_source_redshift(self._redshift)
if self._fixed_magnification:
if x_pos is not None and y_pos is not None:
ra_image, dec_image = x_pos, y_pos
else:
ra_image, dec_image = self.image_position(
kwargs_ps,
kwargs_lens,
magnification_limit=magnification_limit,
kwargs_lens_eqn_solver=kwargs_lens_eqn_solver,
)
if self.k_list is None:
mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens)
else:
mag = []
for i in range(len(ra_image)):
mag.append(
self._lens_model.magnification(
ra_image, dec_image, kwargs_lens, k=self.k_list[i]
)
)
point_amp = kwargs_ps["source_amp"] * np.abs(mag)
else:
point_amp = kwargs_ps["point_amp"]
if x_pos is not None:
point_amp = _expand_to_array(point_amp, len(x_pos))
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
"""Intrinsic brightness amplitude of point source When brightnesses are defined
in magnified on-sky positions, the intrinsic brightness is computed as the mean
in the magnification corrected image position brightnesses.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), used when
brightness are defined in magnified on-sky positions
:return: brightness amplitude (as numpy array)
"""
if self._fixed_magnification:
source_amp = kwargs_ps["source_amp"]
else:
self._lens_model.change_source_redshift(self._redshift)
ra_image, dec_image = kwargs_ps["ra_image"], kwargs_ps["dec_image"]
if self.k_list is None:
mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens)
else:
mag = []
for i in range(len(ra_image)):
mag.append(
self._lens_model.magnification(
ra_image, dec_image, kwargs_lens, k=self.k_list[i]
)
)
point_amp = kwargs_ps["point_amp"]
source_amp = np.mean(np.array(point_amp) / np.array(np.abs(mag)))
return np.array(source_amp)
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@PointSource@Types@[email protected]_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/icicle/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="icicle", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@icicle@[email protected]_END.py
|
{
"filename": "ident_result.py",
"repo_name": "yqiuu/spectuner",
"repo_path": "spectuner_extracted/spectuner-master/spectuner/identify/ident_result.py",
"type": "Python"
}
|
from functools import partial
from collections import defaultdict
from dataclasses import dataclass, field
from copy import deepcopy
import numpy as np
import pandas as pd
def compute_T_single_data(mol_store, config, params, freq_data):
param_mgr = mol_store.create_parameter_manager(config)
T_single_data = defaultdict(dict)
for item in mol_store.mol_list:
for mol in item["molecules"]:
params_single = param_mgr.get_subset_params([mol], params)
mol_store_single = mol_store.select_subset([mol])
T_single_data[item["id"]][mol] \
= mol_store_single.compute_T_pred_data(params_single, freq_data, config)
T_single_data = dict(T_single_data)
return T_single_data
def sum_T_single_data(T_single_dict, T_back, key=None):
# Get a test dict
for sub_dict in T_single_dict.values():
for T_single_data in sub_dict.values():
break
break
T_ret_data = [None for _ in T_single_data]
def sum_sub(target_dict):
for T_single_data in target_dict.values():
for i_segment, T_single in enumerate(T_single_data):
if T_single is None:
continue
if T_ret_data[i_segment] is None:
T_ret_data[i_segment] = T_back
T_ret_data[i_segment] = T_ret_data[i_segment] + T_single - T_back
if key is not None:
sum_sub(T_single_dict[key])
return T_ret_data
for sub_dict in T_single_dict.values():
sum_sub(sub_dict)
return T_ret_data
def derive_df_mol_master_from_res_dict(res_dict):
df_mol_master = pd.concat([
res.derive_df_mol_master() for res in res_dict.values() if res is not None
])
df_mol_master.sort_values(
["t3_score", "t2_score", "t1_score", "num_tp_i"], ascending=False, inplace=True
)
df_mol_master.reset_index(drop=True, inplace=True)
return df_mol_master
@dataclass
class LineTable:
freq: np.ndarray = field(default_factory=partial(np.zeros, 0))
span: np.ndarray = field(default_factory=partial(np.zeros, (0, 2)))
loss: np.ndarray = field(default_factory=partial(np.zeros, 0))
score: np.ndarray = field(default_factory=partial(np.zeros, 0))
frac: np.ndarray = field(default_factory=list)
id: np.ndarray = field(default_factory=list)
name: np.ndarray = field(default_factory=list)
error: np.ndarray = field(default_factory=partial(np.zeros, 0))
norm: np.ndarray = field(default_factory=partial(np.zeros, 0))
def __len__(self):
return len(self.freq)
def append(self, line_table, sparsity=None):
self.freq = np.append(self.freq, line_table.freq)
self.span = np.vstack([self.span, line_table.span])
for name in ["loss", "score", "error", "norm"]:
if sparsity is None:
arr_new = getattr(line_table, name)
else:
inds, num = sparsity
arr_tmp = getattr(line_table, name)
arr_new = np.full(num, np.nan)
if len(inds) > 0:
arr_new[inds] = arr_tmp
setattr(self, name, np.append(getattr(self, name), arr_new))
for name in ["frac", "id", "name"]:
if sparsity is None:
list_new = getattr(line_table, name)
else:
inds, num = sparsity
list_new = [None for _ in range(num)]
for idx, val in zip(inds, getattr(line_table, name)):
list_new[idx] = val
getattr(self, name).extend(list_new)
def extract(self, inds, is_sparse):
if is_sparse:
line_table_new = deepcopy(self)
inds_c = [idx for idx in range(len(self)) if idx not in inds]
for name in ["loss", "score", "error", "norm"]:
getattr(self, name)[inds_c] = np.nan
for name in ["frac", "id", "name"]:
for idx in inds_c:
getattr(line_table_new, name)[idx] = None
else:
line_table_new = LineTable()
for name in ["freq", "span", "loss", "score", "error", "norm"]:
setattr(line_table_new, name, getattr(self, name)[inds])
for name in ["frac", "id", "name"]:
tmp = []
for idx in inds:
tmp.append(getattr(self, name)[idx])
setattr(line_table_new, name, tmp)
return line_table_new
def save_line_table(self, fname):
fp = open(fname, "w")
fp.write("# Line ID\n")
fp.write("# Frequency [MHz]\n")
fp.write("# Identified Species\n")
idx = 0
for freq, name_list in zip(self.freq, self.name):
if name_list is None:
continue
for name in name_list:
fp.write("{},{:.2f},{}\n".format(idx, freq, name))
idx += 1
fp.close()
@dataclass
class IdentResult:
mol_data: dict
line_table: LineTable
line_table_fp: LineTable
T_single_dict: dict
freq_data: list
T_back: float
def __post_init__(self):
self._add_score_data()
self._add_count_data()
def __repr__(self):
text = "Molecules:\n"
for key, sub_dict in self.mol_data.items():
for cols in sub_dict.values():
master_name = cols["master_name"]
break
text += "id={}, {}\n".format(key, master_name)
for name in sub_dict:
text += " - {}\n".format(name)
return text
def _add_score_data(self):
def increase_score_dict(line_table, score_dict):
scores = line_table.score
losses = line_table.loss
frac_list = line_table.frac
id_list = line_table.id
name_list = line_table.name
for i_line in range(len(frac_list)):
if id_list[i_line] is None:
continue
for i_blen in range(len(frac_list[i_line])):
key = id_list[i_line][i_blen]
name = name_list[i_line][i_blen]
frac = frac_list[i_line][i_blen]
loss = losses[i_line]*frac
score = scores[i_line]*frac
score_dict[key][name]["loss"] += loss
score_dict[key][name]["score"] += score
dict_factory = lambda: {"loss": 0., "score": 0.}
score_dict = defaultdict(lambda: defaultdict(dict_factory))
increase_score_dict(self.line_table, score_dict)
increase_score_dict(self.line_table_fp, score_dict)
self.update_mol_data(score_dict)
def _add_count_data(self):
def increase_count_dict(line_table, count_dict, target):
for id_list, name_list in zip(line_table.id, line_table.name):
if id_list is None:
continue
for key, name in zip(id_list, name_list):
count_dict[key][name][target] += 1
def increase_count_i_dict(line_table, count_dict, target):
for id_list, name_list in zip(line_table.id, line_table.name):
if id_list is None:
continue
if len(id_list) == 1:
count_dict[id_list[0]][name_list[0]][target] += 1
dict_factory = lambda: {"num_tp": 0, "num_tp_i": 0, "num_fp": 0}
count_dict = defaultdict(lambda: defaultdict(dict_factory))
increase_count_dict(self.line_table, count_dict, "num_tp")
increase_count_dict(self.line_table_fp, count_dict, "num_fp")
increase_count_i_dict(self.line_table, count_dict, "num_tp_i")
self.update_mol_data(count_dict)
def is_empty(self):
return len(self.mol_data) == 0
def update_mol_data(self, data):
for key, sub_dict in self.mol_data.items():
for name, cols in sub_dict.items():
cols.update(data[key][name])
def get_aggregate_prop(self, key, prop_name):
return sum([cols[prop_name] for cols in self.mol_data[key].values()])
def derive_stats_dict(self):
stats_dict = {
"n_master": len(self.mol_data),
"n_mol": sum([len(sub_dict) for sub_dict in self.mol_data.values()])
}
n_idn = 0
for names in self.line_table.name:
if names is not None:
n_idn += 1
n_tot = len(self.line_table.freq)
recall = n_idn/n_tot
stats_dict.update(n_tot=n_tot, n_idn=n_idn, recall=recall)
return stats_dict
def derive_df_mol(self, max_order=3):
tx_score_dict = self.compute_tx_score(max_order, use_id=False)
data = []
for key, sub_dict in self.mol_data.items():
for name, cols in sub_dict.items():
data.append({"id": key, "name": name, **cols, **tx_score_dict[name]})
df = pd.DataFrame.from_dict(data)
df.sort_values(
["id", "num_tp_i", "score"],
ascending=[True, False, False],
inplace=True
)
return df
def derive_df_mol_master(self, max_order=3):
tx_score_dict = self.compute_tx_score(max_order, use_id=True)
data = []
for key, sub_dict in self.mol_data.items():
for cols in sub_dict.values():
master_name = cols["master_name"]
break
cols = {"id": key, "master_name": master_name}
for prop_name in ["loss", "score", "num_tp", "num_tp_i", "num_fp"]:
cols[prop_name] = self.get_aggregate_prop(key, prop_name)
if key in tx_score_dict:
cols.update(tx_score_dict[key])
data.append(cols)
df = pd.DataFrame.from_dict(data)
df.sort_values("id")
return df
def compute_tx_score(self, max_order, use_id):
def compute(score_list, order):
if len(score_list) < order:
return 0.
return score_list[order - 1]
score_list_dict = defaultdict(list)
if use_id:
line_table_key = self.line_table.id
else:
line_table_key = self.line_table.name
iterator = zip(line_table_key, self.line_table.score, self.line_table.frac)
for id_list, score, frac in iterator:
if id_list is None:
continue
for key, score_sub in zip(id_list, score*frac):
score_list_dict[key].append(score_sub)
if use_id:
key_list = self.mol_data.keys()
else:
key_list = set()
for sub_dict in self.mol_data.values():
key_list.update(sub_dict.keys())
score_dict = {}
for key in key_list:
if key in score_list_dict:
score_list = score_list_dict[key]
score_list.sort(reverse=True)
score_dict[key] = {f"t{order}_score": compute(score_list, order)
for order in range(1, max_order + 1)}
else:
score_dict[key] = {f"t{order}_score": 0.
for order in range(1, max_order + 1)}
return score_dict
def extract(self, key):
mol_data_new = {key: deepcopy(self.mol_data[key])}
#
inds = self.filter_name_list(set((key,)), self.line_table.id)
line_table_new = self.line_table.extract(inds, is_sparse=True)
#
inds = self.filter_name_list(set((key,)), self.line_table_fp.id)
line_table_fp_new = self.line_table_fp.extract(inds, is_sparse=False)
#
T_single_dict_new = {key: deepcopy(self.T_single_dict[key])}
return IdentResult(
mol_data=mol_data_new,
line_table=line_table_new,
line_table_fp=line_table_fp_new,
T_single_dict=T_single_dict_new,
freq_data=self.freq_data,
T_back=self.T_back,
)
def filter_name_list(self, target_set, name_list):
inds = []
for idx, names in enumerate(name_list):
if names is None:
continue
if not target_set.isdisjoint(set(names)):
inds.append(idx)
return inds
def get_T_pred(self, key=None, name=None):
if key is not None and name is not None:
return self.T_single_dict[key][name]
return sum_T_single_data(self.T_single_dict, self.T_back, key)
def get_unknown_lines(self):
freqs = []
for freq, names in zip(self.line_table.freq, self.line_table.name):
if names is None:
freqs.append(freq)
freqs = np.asarray(freqs)
return freqs
def get_identified_lines(self):
freqs = []
for freq, names in zip(self.line_table.freq, self.line_table.name):
if names is not None:
freqs.append(freq)
freqs = np.asarray(freqs)
return freqs
|
yqiuuREPO_NAMEspectunerPATH_START.@spectuner_extracted@spectuner-master@spectuner@identify@[email protected]_END.py
|
{
"filename": "50c966c5427a_more_antenna_stats.py",
"repo_name": "HERA-Team/hera_mc",
"repo_path": "hera_mc_extracted/hera_mc-main/alembic/versions/50c966c5427a_more_antenna_stats.py",
"type": "Python"
}
|
"""more_antenna_stats
Revision ID: 50c966c5427a
Revises: edecd502cdd8
Create Date: 2019-07-19 19:59:08.371361+00:00
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "50c966c5427a"
down_revision = "edecd502cdd8"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("antenna_status", sa.Column("fem_current", sa.Float(), nullable=True))
op.add_column("antenna_status", sa.Column("fem_id", sa.String(), nullable=True))
op.add_column("antenna_status", sa.Column("fem_temp", sa.Float(), nullable=True))
op.add_column("antenna_status", sa.Column("fem_voltage", sa.Float(), nullable=True))
op.add_column("antenna_status", sa.Column("histogram", sa.String(), nullable=True))
op.add_column(
"antenna_status", sa.Column("histogram_bin_centers", sa.String(), nullable=True)
)
op.add_column("antenna_status", sa.Column("pam_current", sa.Float(), nullable=True))
op.add_column("antenna_status", sa.Column("pam_id", sa.String(), nullable=True))
op.add_column("antenna_status", sa.Column("pam_voltage", sa.Float(), nullable=True))
def downgrade():
op.drop_column("antenna_status", "pam_voltage")
op.drop_column("antenna_status", "pam_id")
op.drop_column("antenna_status", "pam_current")
op.drop_column("antenna_status", "histogram_bin_centers")
op.drop_column("antenna_status", "histogram")
op.drop_column("antenna_status", "fem_voltage")
op.drop_column("antenna_status", "fem_temp")
op.drop_column("antenna_status", "fem_id")
op.drop_column("antenna_status", "fem_current")
|
HERA-TeamREPO_NAMEhera_mcPATH_START.@hera_mc_extracted@hera_mc-main@alembic@versions@[email protected]_END.py
|
{
"filename": "truncnorm.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/scipy/stats/truncnorm.py",
"type": "Python"
}
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: import <name> as <name> is required for names to be exported.
# See PEP 484 & https://github.com/jax-ml/jax/issues/7570
from jax._src.scipy.stats.truncnorm import (
cdf as cdf,
logcdf as logcdf,
logpdf as logpdf,
pdf as pdf,
logsf as logsf,
sf as sf
)
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@scipy@[email protected]@.PATH_END.py
|
{
"filename": "test_cosmicray.py",
"repo_name": "astropy/ccdproc",
"repo_path": "ccdproc_extracted/ccdproc-main/ccdproc/tests/test_cosmicray.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.utils import NumpyRNGContext
from astropy.utils.exceptions import AstropyDeprecationWarning
from ccdproc.core import (
background_deviation_box,
background_deviation_filter,
cosmicray_lacosmic,
cosmicray_median,
)
from ccdproc.tests.pytest_fixtures import ccd_data as ccd_data_func
pytest.importorskip("astroscrappy", reason="astroscrappy not installed")
DATA_SCALE = 5.3
NCRAYS = 30
def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS):
size = data.shape[0]
with NumpyRNGContext(125):
crrays = np.random.randint(0, size, size=(ncrays, 2))
# use (threshold + 15) below to make sure cosmic ray is well above the
# threshold no matter what the random number generator returns
crflux = 10 * scale * np.random.random(NCRAYS) + (threshold + 15) * scale
for i in range(ncrays):
y, x = crrays[i]
data.data[y, x] = crflux[i]
def test_cosmicray_lacosmic():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 10
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
_, crarr = cosmicray_lacosmic(ccd_data.data, sigclip=5.9)
# check the number of cosmic rays detected
# Note that to get this to succeed reliably meant tuning
# both sigclip and the threshold
assert crarr.sum() == NCRAYS
def test_cosmicray_lacosmic_ccddata():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9)
# check the number of cosmic rays detected
# Note that to get this to succeed reliably meant tuning
# both sigclip and the threshold
assert nccd_data.mask.sum() == NCRAYS
def test_cosmicray_lacosmic_check_data():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(TypeError):
noise = DATA_SCALE * np.ones_like(ccd_data.data)
cosmicray_lacosmic(10, noise)
@pytest.mark.parametrize("array_input", [True, False])
@pytest.mark.parametrize("gain_correct_data", [True, False])
def test_cosmicray_gain_correct(array_input, gain_correct_data):
# Add regression check for #705 and for the new gain_correct
# argument.
# The issue is that cosmicray_lacosmic gain-corrects the
# data and returns that gain corrected data. That is not the
# intent...
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# No units here on purpose.
gain = 2.0
if array_input:
new_data, cr_mask = cosmicray_lacosmic(
ccd_data.data, gain=gain, gain_apply=gain_correct_data
)
else:
new_ccd = cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=gain_correct_data)
new_data = new_ccd.data
cr_mask = new_ccd.mask
# Fill masked locations with 0 since there is no simple relationship
# between the original value and the corrected value.
orig_data = np.ma.array(ccd_data.data, mask=cr_mask).filled(0)
new_data = np.ma.array(new_data.data, mask=cr_mask).filled(0)
if gain_correct_data:
gain_for_test = gain
else:
gain_for_test = 1.0
np.testing.assert_allclose(gain_for_test * orig_data, new_data)
def test_cosmicray_lacosmic_accepts_quantity_gain():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# The units below are the point of the test
gain = 2.0 * u.electron / u.adu
_ = cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True)
def test_cosmicray_lacosmic_accepts_quantity_readnoise():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
gain = 2.0 * u.electron / u.adu
# The units below are the point of this test
readnoise = 6.5 * u.electron
_ = cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True, readnoise=readnoise)
def test_cosmicray_lacosmic_detects_inconsistent_units():
# This is intended to detect cases like a ccd with units
# of adu, a readnoise in electrons and a gain in adu / electron.
# That is not internally inconsistent.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
ccd_data.unit = "adu"
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
readnoise = 6.5 * u.electron
# The units below are deliberately incorrect.
gain = 2.0 * u.adu / u.electron
with pytest.raises(ValueError) as e:
cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True, readnoise=readnoise)
assert "Inconsistent units" in str(e.value)
def test_cosmicray_lacosmic_warns_on_ccd_in_electrons():
# Check that an input ccd in electrons raises a warning.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
# The unit below is important for the test; this unit on
# input is supposed to raise an error.
ccd_data.unit = u.electron
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
# No units here on purpose.
gain = 2.0
# Don't really need to set this (6.5 is the default value) but want to
# make lack of units explicit.
readnoise = 6.5
with pytest.warns(UserWarning, match="Image unit is electron"):
cosmicray_lacosmic(ccd_data, gain=gain, gain_apply=True, readnoise=readnoise)
# The values for inbkg and invar are DELIBERATELY BAD. They are supposed to be
# arrays, so if detect_cosmics is called with these bad values a ValueError
# will be raised, which we can check for.
@pytest.mark.parametrize(
"new_args", [dict(inbkg=5), dict(invar=5), dict(inbkg=5, invar=5)]
)
def test_cosmicray_lacosmic_invar_inbkg(new_args):
# This IS NOT TESTING FUNCTIONALITY it is simply testing
# that calling with the new keyword arguments to astroscrappy
# 1.1.0 raises no error.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
with pytest.raises(TypeError):
cosmicray_lacosmic(ccd_data, sigclip=5.9, **new_args)
def test_cosmicray_median_check_data():
with pytest.raises(TypeError):
ndata, crarr = cosmicray_median(10, thresh=5, mbox=11, error_image=DATA_SCALE)
def test_cosmicray_median():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
ndata, crarr = cosmicray_median(
ccd_data.data, thresh=5, mbox=11, error_image=DATA_SCALE
)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_ccddata():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
ccd_data.uncertainty = ccd_data.data * 0.0 + DATA_SCALE
nccd = cosmicray_median(ccd_data, thresh=5, mbox=11, error_image=None)
# check the number of cosmic rays detected
assert nccd.mask.sum() == NCRAYS
def test_cosmicray_median_masked():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
data = np.ma.masked_array(ccd_data.data, (ccd_data.data > -1e6))
ndata, crarr = cosmicray_median(data, thresh=5, mbox=11, error_image=DATA_SCALE)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_background_None():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
data, crarr = cosmicray_median(ccd_data.data, thresh=5, mbox=11, error_image=None)
# check the number of cosmic rays detected
assert crarr.sum() == NCRAYS
def test_cosmicray_median_gbox():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
scale = DATA_SCALE # yuck. Maybe use pytest.parametrize?
threshold = 5
add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS)
error = ccd_data.data * 0.0 + DATA_SCALE
data, crarr = cosmicray_median(
ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=0, gbox=5
)
data = np.ma.masked_array(data, crarr)
assert crarr.sum() > NCRAYS
assert abs(data.std() - scale) < 0.1
def test_cosmicray_median_rbox():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
scale = DATA_SCALE # yuck. Maybe use pytest.parametrize?
threshold = 5
add_cosmicrays(ccd_data, scale, threshold, ncrays=NCRAYS)
error = ccd_data.data * 0.0 + DATA_SCALE
data, crarr = cosmicray_median(
ccd_data.data, error_image=error, thresh=5, mbox=11, rbox=21, gbox=5
)
assert data[crarr].mean() < ccd_data.data[crarr].mean()
assert crarr.sum() > NCRAYS
def test_cosmicray_median_background_deviation():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(TypeError):
cosmicray_median(ccd_data.data, thresh=5, mbox=11, error_image="blank")
def test_background_deviation_box():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
bd = background_deviation_box(cd, 25)
assert abs(bd.mean() - scale) < 0.10
def test_background_deviation_box_fail():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
with pytest.raises(ValueError):
background_deviation_box(cd, 0.5)
def test_background_deviation_filter():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
bd = background_deviation_filter(cd, 25)
assert abs(bd.mean() - scale) < 0.10
def test_background_deviation_filter_fail():
with NumpyRNGContext(123):
scale = 5.3
cd = np.random.normal(loc=0, size=(100, 100), scale=scale)
with pytest.raises(ValueError):
background_deviation_filter(cd, 0.5)
# This test can be removed in ccdproc 3.0 when support for old
# astroscrappy is removed.
def test_cosmicray_lacosmic_pssl_deprecation_warning():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.warns(AstropyDeprecationWarning):
cosmicray_lacosmic(ccd_data, pssl=1.0)
def test_cosmicray_lacosmic_pssl_and_inbkg_fails():
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
with pytest.raises(ValueError) as err:
# An error should be raised if both pssl and inbkg are provided
with pytest.warns(AstropyDeprecationWarning):
# The deprecation warning is expected and should be captured
cosmicray_lacosmic(ccd_data, pssl=3, inbkg=ccd_data.data)
assert "pssl and inbkg" in str(err)
def test_cosmicray_lacosmic_pssl_does_not_fail():
# This test is a copy/paste of test_cosmicray_lacosmic_ccddata
# except with pssl=0.0001 as an argument. Subtracting nearly zero from
# the background should have no effect. The test is really
# to make sure that passing in pssl does not lead to an error
# since the new interface does not include pssl.
ccd_data = ccd_data_func(data_scale=DATA_SCALE)
threshold = 5
add_cosmicrays(ccd_data, DATA_SCALE, threshold, ncrays=NCRAYS)
noise = DATA_SCALE * np.ones_like(ccd_data.data)
ccd_data.uncertainty = noise
with pytest.warns(AstropyDeprecationWarning):
# The deprecation warning is expected and should be captured
nccd_data = cosmicray_lacosmic(ccd_data, sigclip=5.9, pssl=0.0001)
# check the number of cosmic rays detected
# Note that to get this to succeed reliably meant tuning
# both sigclip and the threshold
assert nccd_data.mask.sum() == NCRAYS
|
astropyREPO_NAMEccdprocPATH_START.@ccdproc_extracted@ccdproc-main@ccdproc@tests@[email protected]_END.py
|
{
"filename": "_meta.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/_meta.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="scattercarpet", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@[email protected]_END.py
|
{
"filename": "WISPLFDatabaseManager.py",
"repo_name": "HSTWISP/wisp_analysis",
"repo_path": "wisp_analysis_extracted/wisp_analysis-master/WISPLFDatabaseManager.py",
"type": "Python"
}
|
import datetime
import numpy as np
import sqlite3
class WISPLFDatabaseManager:
"""
Utility class to manage persistence of line identification data during the
WISP line-finding process.
"""
validMutableFlags = [
'ZEROTH',
'CONTIN',
'MISC'
]
validFlags = [
'CONTAM',
'REJECT'
] + validMutableFlags
validContamFlags = ['o2',
'hg',
'hb',
'o3',
'ha',
's2',
's31',
's32',
'he1',
'c']
tableNames = ['catalogue',
'annotations',
'flags'
]
lineListHeadings = [
'ParNo',
'ObjID',
'RA',
'Dec',
'Jmagnitude [99.0 denotes no detection]',
'Hmagnitude [99.0 denotes no detection]',
'A_IMAGE',
'B_IMAGE',
'redshift',
'redshift_err',
'dz_oiii',
'dz_oii',
'dz_siii_he1',
'G141_FWHM_Obs [Angs]',
'G141_FWHM_Obs_err',
'oii_flux',
'oii_error',
'oii_EW_obs',
'oii_contam',
'hg_flux',
'hg_err',
'hg_EW_obs',
'hg_contam',
'hb_flux',
'hb_err',
'hb_EW_obs',
'hb_contam',
'oiii_flux [both lines]',
'oiii_err [both lines]',
'oiii_EW_obs [both lines]',
'oiii_contam [both lines]',
'hanii_flux',
'hanii_err',
'hanii_EW_obs',
'hanii_contam',
'sii_flux',
'sii_err',
'sii_EW_obs',
'sii_contam',
'siii_9069_flux',
'siii_9069_err',
'siii_9069_EW_obs',
'siii_9069_contam',
'siii_9532_flux',
'siii_9532_err',
'siii_9532_EW_obs',
'siii_9532_contam',
'he1_10830_flux',
'he1_10830_err',
'he1_10830_EW_obs',
'he1_10830_contam',
]
fitResultKeys = ['redshift',
'redshift_err',
'dz_oiii',
'dz_oii',
'dz_siii_he1',
'fwhm_g141',
'fwhm_g141_err',
'oii_flux',
'oii_error',
'oii_ew_obs',
'hg_flux',
'hg_error',
'hg_ew_obs',
'hb_flux',
'hb_error',
'hb_ew_obs',
'oiii_flux',
'oiii_error',
'oiii_ew_obs',
'hanii_flux',
'hanii_error',
'hanii_ew_obs',
'sii_flux',
'sii_error',
'sii_ew_obs',
'siii_9069_flux',
'siii_9069_error',
'siii_9069_ew_obs',
'siii_9532_flux',
'siii_9532_error',
'siii_9532_ew_obs',
'he1_flux',
'he1_error',
'he1_ew_obs']
def __init__(self, dbFileNamePrefix):
# print('Using sqlite3 version {}'.format(sqlite3.version))
self.dbFileNamePrefix = dbFileNamePrefix
self.dbFileName = '{}_sqlite.db'.format(self.dbFileNamePrefix)
self.dbConnection = sqlite3.connect(self.dbFileName)
self.dbConnection.row_factory = sqlite3.Row
self.dbCursor = self.dbConnection.cursor()
self.checkAndInitTables()
def __del__(self):
self.dbConnection.commit()
self.dbConnection.close()
def checkAndInitTables(self):
self.createCatalogueTable()
self.createAnnotationTable()
self.createFlagTable()
def createCatalogueTable(self):
# print('Creating catalogue table in SQLLite database...')
self.dbCursor.execute('''CREATE TABLE IF NOT EXISTS catalogue (
ParNo int,
ObjID int,
RA real,
Dec real,
Jmagnitude real,
Hmagnitude real,
A_IMAGE real,
B_IMAGE real,
redshift real,
redshift_err real,
dz_oiii real,
dz_oii real,
dz_siii_he1 real,
fwhm_g141 real,
fwhm_g141_err real,
oii_flux real,
oii_error real,
oii_ew_obs real,
hg_flux real,
hg_error real,
hg_ew_obs real,
hb_flux real,
hb_error real,
hb_ew_obs real,
oiii_flux real,
oiii_error real,
oiii_ew_obs real,
hanii_flux real,
hanii_error real,
hanii_ew_obs real,
sii_flux real,
sii_error real,
sii_ew_obs real,
siii_9069_flux real,
siii_9069_error real,
siii_9069_ew_obs real,
siii_9532_flux real,
siii_9532_error real,
siii_9532_ew_obs real,
he1_flux real,
he1_error real,
he1_ew_obs real,
ContamFlag int,
EntryTime text
)''')
self.dbConnection.commit()
# print('Done.' if self.dbCursor.rowcount >
# 0 else 'Table was already present.')
def createAnnotationTable(self):
# print('Creating annotations table in SQLLite database...')
self.dbCursor.execute('''CREATE TABLE IF NOT EXISTS annotations (
ParNo int,
ObjID int,
Comment text
)''')
self.dbConnection.commit()
# print('Done.' if self.dbCursor.rowcount >
# 0 else 'Table was already present.')
def createFlagTable(self):
# print('Creating annotations table in SQLLite database...')
self.dbCursor.execute('''CREATE TABLE IF NOT EXISTS flags (
ParNo int,
ObjID int,
FlagName text,
FlagValue int
)''')
self.dbConnection.commit()
# print('Done.' if self.dbCursor.rowcount >
# 0 else 'Table was already present.')
def saveCatalogueEntry(self, catalogueEntryData):
query = 'INSERT INTO catalogue VALUES ({})'.format(
','.join(['?'] * len(catalogueEntryData))
)
# print(query)
self.dbCursor.execute(query, catalogueEntryData)
self.dbConnection.commit()
def loadCatalogueEntry(self, parNumber, objectId):
query = 'SELECT * FROM catalogue WHERE (ParNo=? AND ObjID=?)'
self.dbCursor.execute(query, (parNumber, objectId))
catalogueEntryData = self.dbCursor.fetchall()
if len(catalogueEntryData) < 1:
return None
# FIXME: For now, just return the last row added
nonFitResults = [catalogueEntryData[-1][key]
for key in catalogueEntryData[-1].keys() if key not in WISPLFDatabaseManager.fitResultKeys]
fitResults = {key: catalogueEntryData[-1][key]
for key in WISPLFDatabaseManager.fitResultKeys}
return tuple(nonFitResults), fitResults
def getMostRecentObject(self, parNumber=None):
query = '''SELECT ParNo, ObjID, EntryTime
FROM catalogue{}
ORDER BY DATETIME(EntryTime)
DESC LIMIT 1'''.format('WHERE ParNo = ?' if parNumber is not None else '')
self.dbCursor.execute(
*[argument for argument in [query, parNumber] if argument is not None])
mostRecentEntry = self.dbCursor.fetchone()
if mostRecentEntry is not None:
return mostRecentEntry['ParNo'], mostRecentEntry['ObjId'], mostRecentEntry['EntryTime']
return None
def layoutCatalogueData(self,
parNumber,
objectId,
ra,
dec,
jMagnitude,
hMagnitude,
aImage,
bImage,
fitResults,
flagContent):
if fitResults is None:
fitResults = {
key: None for key in WISPLFDatabaseManager.fitResultKeys}
return (parNumber, objectId, ra, dec, jMagnitude, hMagnitude, aImage, bImage,
fitResults['redshift'],
fitResults['redshift_err'],
fitResults['dz_oiii'],
fitResults['dz_oii'],
fitResults['dz_siii_he1'],
fitResults['fwhm_g141'],
fitResults['fwhm_g141_err'],
fitResults['oii_flux'],
fitResults['oii_error'],
fitResults['oii_ew_obs'],
fitResults['hg_flux'],
fitResults['hg_error'],
fitResults['hg_ew_obs'],
fitResults['hb_flux'],
fitResults['hb_error'],
fitResults['hb_ew_obs'],
fitResults['oiii_flux'],
fitResults['oiii_error'],
fitResults['oiii_ew_obs'],
fitResults['hanii_flux'],
fitResults['hanii_error'],
fitResults['hanii_ew_obs'],
fitResults['sii_flux'],
fitResults['sii_error'],
fitResults['sii_ew_obs'],
fitResults['siii_9069_flux'],
fitResults['siii_9069_error'],
fitResults['siii_9069_ew_obs'],
fitResults['siii_9532_flux'],
fitResults['siii_9532_error'],
fitResults['siii_9532_ew_obs'],
fitResults['he1_flux'],
fitResults['he1_error'],
fitResults['he1_ew_obs'],
flagContent,
str(datetime.datetime.now().isoformat())
)
def saveAnnotation(self, annotationData):
query = 'INSERT INTO annotations VALUES ({})'.format(
','.join(['?'] * len(annotationData))
)
self.dbCursor.execute(query, annotationData)
self.dbConnection.commit()
def setFlagsFromString(self, parNumber, objId, flagDataString, delimiter=','):
# Assumes that string is a delimiter separated list of flags and values
flagDataTokens = [token.strip()
for token in flagDataString.split(delimiter)]
flagNames = flagDataTokens[::2]
flagValues = flagDataTokens[1::2]
flagData = list(zip(flagNames, flagValues))
self.setFlags(parNumber, objId, flagData)
def setFlags(self, parNumber, objId, flagData):
# Only attempt to set values for valid flags
flagData = [(parNumber, objId, flagDatum[0], flagDatum[1])
for flagDatum in flagData
if flagDatum[0] in WISPLFDatabaseManager.validFlags + WISPLFDatabaseManager.validContamFlags ]
# flagData can be a list of (flagName, flagValue) tuples
query = 'INSERT INTO flags VALUES (?, ?, ?, ?)'
self.dbCursor.executemany(query, flagData)
self.dbConnection.commit()
def resetDatabaseTables(self):
query = 'DROP TABLE IF EXISTS {}'
for tableName in WISPLFDatabaseManager.tableNames :
self.dbCursor.execute(query.format(tableName))
self.dbConnection.commit()
self.checkAndInitTables()
def writeCatalogueTextFile(self):
catalogueQuery = 'SELECT * FROM catalogue'
|
HSTWISPREPO_NAMEwisp_analysisPATH_START.@wisp_analysis_extracted@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/empirical_models/component_model_templates/__init__.py",
"type": "Python"
}
|
from .binary_galprop_models import *
from .scatter_models import *
from .prim_galprop_model import PrimGalpropModel
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@empirical_models@component_model_templates@[email protected]_END.py
|
{
"filename": "misc.py",
"repo_name": "handley-lab/anesthetic",
"repo_path": "anesthetic_extracted/anesthetic-master/anesthetic/plotting/_matplotlib/misc.py",
"type": "Python"
}
|
import pandas.plotting._matplotlib.misc as misc
from anesthetic.plotting._matplotlib.core import _compress_weights
def scatter_matrix(frame, *args, **kwargs):
# noqa: disable=D103
frame = _compress_weights(kwargs, frame)
return misc.scatter_matrix(frame, *args, **kwargs)
def bootstrap_plot(series, *args, **kwargs):
# noqa: disable=D103
series = _compress_weights(kwargs, series)
return misc.bootstrap_plot(series, *args, **kwargs)
|
handley-labREPO_NAMEanestheticPATH_START.@anesthetic_extracted@anesthetic-master@anesthetic@plotting@[email protected]@.PATH_END.py
|
{
"filename": "fpfs_sim.py",
"repo_name": "mr-superonion/FPFS",
"repo_path": "FPFS_extracted/FPFS-master/bin/fpfs_sim.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# FPFS shear estimator
# Copyright 20220312 Xiangchong Li.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import schwimmbad
from argparse import ArgumentParser
from fpfs.tasks import SimulationTask
if __name__ == "__main__":
parser = ArgumentParser(description="fpfs simulation")
parser.add_argument(
"--min_id",
required=True,
type=int,
help="minimum id number, e.g. 0",
)
parser.add_argument(
"--max_id",
required=True,
type=int,
help="maximum id number, e.g. 4000",
)
parser.add_argument("--config", required=True, type=str, help="configure file name")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--ncores",
dest="n_cores",
default=1,
type=int,
help="Number of processes (uses multiprocessing).",
)
group.add_argument(
"--mpi",
dest="mpi",
default=False,
action="store_true",
help="Run with MPI.",
)
args = parser.parse_args()
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.n_cores)
worker = SimulationTask(args.config)
refs = list(range(args.min_id, args.max_id))
for r in pool.map(worker.run, refs):
pass
pool.close()
|
mr-superonionREPO_NAMEFPFSPATH_START.@FPFS_extracted@FPFS-master@bin@[email protected]_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="scatterpolar", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@[email protected]_END.py
|
{
"filename": "_ticklen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/polar/radialaxis/_ticklen.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="layout.polar.radialaxis", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@polar@radialaxis@[email protected]_END.py
|
{
"filename": "_value.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/volume/colorbar/tickformatstop/_value.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="volume.colorbar.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@volume@colorbar@tickformatstop@[email protected]_END.py
|
{
"filename": "FILE_FORMATS.md",
"repo_name": "HITS-AIN/PINK",
"repo_path": "PINK_extracted/PINK-master/FILE_FORMATS.md",
"type": "Markdown"
}
|
# Binary Data Format
Every file can have multiple readable comment lines at first which all must have the character `#` as first letter.
All indices are decoded as 32-bit integer. The file format version is 2. Currently,
only 32-bit floating point numbers will be supported as data type, but we will be prepared for the future.
- 0: float 32
- 1: float 64
- 2: integer 8
- 3: integer 16
- 4: integer 32
- 5: integer 64
- 6: unsigned integer 8
- 7: unsigned integer 16
- 8: unsigned integer 32
- 9: unsigned integer 64
The layout for data, som, and neuron can be
- 0: cartesian
- 1: hexagonal
followed by the dimensionality and the dimensions.
## Cartesian layout
The data layout is row-major (C-style), which means that the strides grow from right to left.
### 2-dim example 10x10
The layout description is: `0 2 10 10`, the strides will be `(10, 0)` and the element `[i, j]` is at memory position `i * 10 + j`.
### 3-dim example 10x10x3
The layout description is: `0 3 10 10 3`, the strides will be `(30, 3, 0)` and the element `[i, j, k]` is at memory position `i * 30 + j * 3 + k`.
## Hexagonal layout
### 2-dim example 11x11
The layout description is: `1 2 11 11`. The row- and column-dimension must be odd and equal (`d`). The memory position `p` can be calculated by
```python
r = int((d - 1) / 2)
row_size = [d - abs(r - i) for i in range(d)]
row_offset = [sum(size[0:i]) for i in range(d+1)]
p = offset[i] + j
if (r > i) p -= r - i
```
Please see also https://www.redblobgames.com/grids/hexagons/#map-storage
## Data file for training and mapping
```
<file format version> 0 <data-type> <number of entries> <data layout> <data>
```
Example:
A data file containing 1000 entries of a 2-dimensional image with 128x128 pixels looks like
```
2 0 0 1000 0 2 128 128 <16384000 floating point entries>
```
## SOM file
```
<file format version> 1 <data-type> <som layout> <neuron layout> <data>
```
## Mapping file
```
<file format version> 2 <data-type> <number of entries> <som layout> <data>
```
## Best rotation and flipping parameter file
```
<file format version> 3 <number of entries> <som layout> <data>
```
The data section contains a bool (is flipped) and a 32-bit float number (angle in radian) for each neuron.
|
HITS-AINREPO_NAMEPINKPATH_START.@PINK_extracted@PINK-master@[email protected]_END.py
|
{
"filename": "train.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/references/segmentation/train.py",
"type": "Python"
}
|
import datetime
import os
import time
import warnings
import presets
import torch
import torch.utils.data
import torchvision
import utils
from coco_utils import get_coco
from torch import nn
from torch.optim.lr_scheduler import PolynomialLR
from torchvision.transforms import functional as F, InterpolationMode
def get_dataset(args, is_train):
def sbd(*args, **kwargs):
kwargs.pop("use_v2")
return torchvision.datasets.SBDataset(*args, mode="segmentation", **kwargs)
def voc(*args, **kwargs):
kwargs.pop("use_v2")
return torchvision.datasets.VOCSegmentation(*args, **kwargs)
paths = {
"voc": (args.data_path, voc, 21),
"voc_aug": (args.data_path, sbd, 21),
"coco": (args.data_path, get_coco, 21),
}
p, ds_fn, num_classes = paths[args.dataset]
image_set = "train" if is_train else "val"
ds = ds_fn(p, image_set=image_set, transforms=get_transform(is_train, args), use_v2=args.use_v2)
return ds, num_classes
def get_transform(is_train, args):
if is_train:
return presets.SegmentationPresetTrain(base_size=520, crop_size=480, backend=args.backend, use_v2=args.use_v2)
elif args.weights and args.test_only:
weights = torchvision.models.get_weight(args.weights)
trans = weights.transforms()
def preprocessing(img, target):
img = trans(img)
size = F.get_dimensions(img)[1:]
target = F.resize(target, size, interpolation=InterpolationMode.NEAREST)
return img, F.pil_to_tensor(target)
return preprocessing
else:
return presets.SegmentationPresetEval(base_size=520, backend=args.backend, use_v2=args.use_v2)
def criterion(inputs, target):
losses = {}
for name, x in inputs.items():
losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255)
if len(losses) == 1:
return losses["out"]
return losses["out"] + 0.5 * losses["aux"]
def evaluate(model, data_loader, device, num_classes):
model.eval()
confmat = utils.ConfusionMatrix(num_classes)
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
num_processed_samples = 0
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, 100, header):
image, target = image.to(device), target.to(device)
output = model(image)
output = output["out"]
confmat.update(target.flatten(), output.argmax(1).flatten())
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
num_processed_samples += image.shape[0]
confmat.reduce_from_all_processes()
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
if (
hasattr(data_loader.dataset, "__len__")
and len(data_loader.dataset) != num_processed_samples
and torch.distributed.get_rank() == 0
):
# See FIXME above
warnings.warn(
f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} "
"samples were used for the validation, which might bias the results. "
"Try adjusting the batch size and / or the world size. "
"Setting the world size to 1 is always a safe bet."
)
return confmat
def train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, print_freq, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
header = f"Epoch: [{epoch}]"
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image, target = image.to(device), target.to(device)
with torch.cuda.amp.autocast(enabled=scaler is not None):
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
if scaler is not None:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
def main(args):
if args.backend.lower() != "pil" and not args.use_v2:
# TODO: Support tensor backend in V1?
raise ValueError("Use --use-v2 if you want to use the tv_tensor or tensor backend.")
if args.use_v2 and args.dataset != "coco":
raise ValueError("v2 is only support supported for coco dataset for now.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
if args.use_deterministic_algorithms:
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
dataset, num_classes = get_dataset(args, is_train=True)
dataset_test, _ = get_dataset(args, is_train=False)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
collate_fn=utils.collate_fn,
drop_last=True,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn
)
model = torchvision.models.get_model(
args.model,
weights=args.weights,
weights_backbone=args.weights_backbone,
num_classes=num_classes,
aux_loss=args.aux_loss,
)
model.to(device)
if args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
params_to_optimize = [
{"params": [p for p in model_without_ddp.backbone.parameters() if p.requires_grad]},
{"params": [p for p in model_without_ddp.classifier.parameters() if p.requires_grad]},
]
if args.aux_loss:
params = [p for p in model_without_ddp.aux_classifier.parameters() if p.requires_grad]
params_to_optimize.append({"params": params, "lr": args.lr * 10})
optimizer = torch.optim.SGD(params_to_optimize, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scaler = torch.cuda.amp.GradScaler() if args.amp else None
iters_per_epoch = len(data_loader)
main_lr_scheduler = PolynomialLR(
optimizer, total_iters=iters_per_epoch * (args.epochs - args.lr_warmup_epochs), power=0.9
)
if args.lr_warmup_epochs > 0:
warmup_iters = iters_per_epoch * args.lr_warmup_epochs
args.lr_warmup_method = args.lr_warmup_method.lower()
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_decay, total_iters=warmup_iters
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=warmup_iters
)
else:
raise RuntimeError(
f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported."
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[warmup_iters]
)
else:
lr_scheduler = main_lr_scheduler
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu", weights_only=True)
model_without_ddp.load_state_dict(checkpoint["model"], strict=not args.test_only)
if not args.test_only:
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.amp:
scaler.load_state_dict(checkpoint["scaler"])
if args.test_only:
# We disable the cudnn benchmarking because it can noticeably affect the accuracy
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)
print(confmat)
return
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq, scaler)
confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)
print(confmat)
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
if args.amp:
checkpoint["scaler"] = scaler.state_dict()
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Segmentation Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
parser.add_argument("--model", default="fcn_resnet101", type=str, help="model name")
parser.add_argument("--aux-loss", action="store_true", help="auxiliary loss")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=8, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=30, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=16, type=int, metavar="N", help="number of data loading workers (default: 16)"
)
parser.add_argument("--lr", default=0.01, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument("--lr-warmup-epochs", default=0, type=int, help="the number of epochs to warmup (default: 0)")
parser.add_argument("--lr-warmup-method", default="linear", type=str, help="the warmup method (default: linear)")
parser.add_argument("--lr-warmup-decay", default=0.01, type=float, help="the decay for lr")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only."
)
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
parser.add_argument("--weights-backbone", default=None, type=str, help="the backbone weights enum name to load")
# Mixed precision training parameters
parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training")
parser.add_argument("--backend", default="PIL", type=str.lower, help="PIL or tensor - case insensitive")
parser.add_argument("--use-v2", action="store_true", help="Use V2 transforms")
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@references@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astrosmili/smili",
"repo_path": "smili_extracted/smili-master/smili/geomodel/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'''
This is a sub-module of smili handling image fits data.
'''
__author__ = "Smili Developer Team"
from . import geomodel
from .geomodel import GeoModel, Gaussian
|
astrosmiliREPO_NAMEsmiliPATH_START.@smili_extracted@smili-master@smili@geomodel@[email protected]_END.py
|
{
"filename": "find_redshift_fix.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/dev/find_redshift_fix.py",
"type": "Python"
}
|
import numpy
from astropy.io import fits
from matplotlib import pyplot
from mangadap.util.bitmask import BitMask
from mangadap.config import defaults
sdssbits_file = defaults.sdss_maskbits_file()
targ1bm = BitMask.from_par_file(sdssbits_file, 'MANGA_TARGET1')
targ2bm = BitMask.from_par_file(sdssbits_file, 'MANGA_TARGET2')
targ3bm = BitMask.from_par_file(sdssbits_file, 'MANGA_TARGET3')
hdu = fits.open('drpall-v3_1_1.fits')
#print(hdu['MANGA'].columns.names)
indx = hdu['MANGA'].data['z'] < 0
mngtarg1 = hdu['MANGA'].data['mngtarg1'][indx]
mngtarg2 = hdu['MANGA'].data['mngtarg2'][indx]
mngtarg3 = hdu['MANGA'].data['mngtarg3'][indx]
print('MANGA_TARGET1')
for b in numpy.unique(mngtarg1):
print(b, targ1bm.flagged_bits(b))
print('MANGA_TARGET2')
for b in numpy.unique(mngtarg2):
print(b, targ2bm.flagged_bits(b))
print('MANGA_TARGET3')
for b in numpy.unique(mngtarg3):
print(b, targ3bm.flagged_bits(b))
indx = numpy.where(indx)[0]
for i in indx:
print('{0:>5} {1:>5} {2:12.5f} {3:8.2f} {4:7.1f} {5:7.1f} {6:>15} {7:>15} {8:>15}'.format(
hdu['MANGA'].data['plate'][i], hdu['MANGA'].data['ifudsgn'][i],
hdu['MANGA'].data['z'][i], hdu['MANGA'].data['nsa_elpetro_ba'][i],
hdu['MANGA'].data['nsa_elpetro_phi'][i], hdu['MANGA'].data['nsa_elpetro_th50_r'][i],
','.join(targ1bm.flagged_bits(hdu['MANGA'].data['mngtarg1'][i])),
','.join(targ2bm.flagged_bits(hdu['MANGA'].data['mngtarg2'][i])),
','.join(targ3bm.flagged_bits(hdu['MANGA'].data['mngtarg3'][i]))))
print(numpy.sum(indx))
exit()
indx = numpy.where(hdu['MANGA'].data['PLATEIFU'] == '8261-12705')[0][0]
print(hdu['MANGA'].data['z'][indx])
print(hdu['MANGA'].data['nsa_sersic_ba'][indx], hdu['MANGA'].data['nsa_elpetro_ba'][indx])
print(hdu['MANGA'].data['nsa_sersic_phi'][indx], hdu['MANGA'].data['nsa_elpetro_phi'][indx])
print(hdu['MANGA'].data['nsa_sersic_th50'][indx], hdu['MANGA'].data['nsa_elpetro_th50_r'][indx])
exit()
pyplot.scatter(hdu['MANGA'].data['nsa_sersic_ba'], hdu['MANGA'].data['nsa_elpetro_ba'],
marker='.', s=30, lw=0, color='k')
pyplot.show()
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@dev@[email protected]_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/isosurface/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="z", parent_name="isosurface", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@isosurface@[email protected]_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcats/tickfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="parcats.tickfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@parcats@tickfont@[email protected]_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcoords/line/colorbar/tickfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="parcoords.line.colorbar.tickfont",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@parcoords@line@colorbar@tickfont@[email protected]_END.py
|
{
"filename": "_geometry.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/prototype/transforms/_geometry.py",
"type": "Python"
}
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.prototype.tv_tensors import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
tv_tensors.Image,
is_pure_tensor,
tv_tensors.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, tv_tensors.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, tv_tensors.Mask)):
inpt = tv_tensors.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
inpt = tv_tensors.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@prototype@transforms@[email protected]_END.py
|
{
"filename": "terra.py",
"repo_name": "ramstojh/terra",
"repo_path": "terra_extracted/terra-master/terra/terra.py",
"type": "Python"
}
|
"""
A script for reproducing the observed abundances of a star from the solar abundances of Asplund + 2021.
"""
import pandas as pd
import numpy as np
from astropy import constants as const
import matplotlib.pyplot as plt
from tqdm import trange, tqdm
import pkg_resources
import os
pd.options.display.float_format = "{:,.2f}".format
Msun = const.M_sun.value # solar mass (1.9884099×10^30 kg)
Mearth = const.M_earth.value # terrestrial mass (5.9721679×10^24 kg)
HH = 1.00794*pow(10, 12) # atomic weight of Hydrogen
HHe = 4.00260*pow(10, 10.93) # atomic weight of Helium
#configuring plot
plotpar = {'axes.labelsize': 25,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'font.family': 'serif',
'axes.linewidth': 2,
'text.usetex': True}
plt.rcParams.update(plotpar)
#data path
this_dir, this_filename = os.path.split(__file__)
class pacha(object):
def __init__(self):
self.feh = None
self.mass = None
self.model = None
self.chondrites = None
self.data_input = None
self.Mcon = None
self.data_output = None
@classmethod
def cvmass(self, feh, mass, model=None):
'''
Calculate the convective mass of a star given a known [Fe/H] and mass.
Important to note that Z is scaled to [Fe/H] in the model tables.
The output is in solar mass units.
'''
'''
Args
[Fe/H] : stellar metallicity
Mass : stellar mass
model : stellar evolution model
'''
#choosing model
if not model:
model = 'yale.txt'
if model == 'yale':
model = 'yale.txt'
if model == 'lionel':
model = 'model-lionel.txt'
#reading data
PATH_model = os.path.join(this_dir, 'data', model)
data = np.loadtxt(PATH_model)
#double interpolation between mass and metallicity
for i in range(len(data[0])):
if feh == data[0][i]:
Z1 = feh
Z2 = 0.0
xp = i
break
elif data[0][i-1] < feh < data[0][i]:
Z1 = data[0][i-1]
Z2 = data[0][i]
xp = i
for j in range(10):
if mass == data[j][0]:
M1 = mass
M2 = 0.0
yp = j
break
elif data[j-1][0] < mass < data[j][0]:
M1 = data[j-1][0]
M2 = data[j][0]
yp = j
m1 = (Z2 - feh)/(Z2 - Z1)
m2 = (feh - Z1)/(Z2 - Z1)
b1 = (M2 - mass)/(M2 - M1)
b2 = (mass - M1)/(M2 - M1)
if Z1 == feh and M1 == mass:
CM = data[yp][xp]
elif Z1 == feh and M1 == data[yp-1][0]:
CM11 = data[yp-1][xp]
CM12 = 0
CM21 = data[yp][xp]
CM22 = 0
CM = (m1*CM11 + m2*CM12)*b1 + (m1*CM21 + m2*CM22)*b2
elif Z1 == data[0][xp-1] and M1 == mass:
CM11 = data[yp][xp-1]
CM12 = data[yp][xp]
CM21 = 0
CM22 = 0
CM = (m1*CM11 + m2*CM12)*b1 + (m1*CM21 + m2*CM22)*b2
elif Z1 == data[0][xp-1] and M1 == data[yp-1][0]:
CM11 = data[yp-1][xp-1]
CM12 = data[yp-1][xp]
CM21 = data[yp][xp-1]
CM22 = data[yp][xp]
CM = (m1*CM11 + m2*CM12)*b1 + (m1*CM21 + m2*CM22)*b2
return CM
@classmethod
def mod_abd(self, feh, mass, data_input=None, model=None, Mcon=None, chondrites=None, data_output=None):
'''
Args
data_input : table containing the observed abundances
Mcon : convective mass
chondrites : chondrites abundances
data_output : name of the star
'''
if not Mcon:
Mcon = self.cvmass(feh, mass, model)
else:
Mcon = Mcon
if not chondrites:
chondrites = 'CM'
else:
chondrites = chondrites
if not data_output:
data_output = ''
else:
data_output = data_output
#solar and earth abundances
PATH_seab = os.path.join(this_dir, 'data', 'seab_data.csv')
seab = pd.read_csv(PATH_seab)
#chondrites
#the user can choose the chondrites type: CI, CM, CO, CV, H, L, LL, EH, EL
PATH_met = os.path.join(this_dir, 'data', 'Chondrites.csv')
met = pd.read_csv(PATH_met, usecols=[chondrites])
#observed abundances
tab_ab = pd.read_csv(data_input)
carbon = tab_ab[tab_ab.element == 'C']
tab_ab['[C/H]'] = tab_ab['[X/H]'] - carbon['[X/H]'].values
#determining abundances of Asplund21
seab['wat'] = seab['A']*10**(seab['Asplund21']+feh)
#seab['wat'] = seab['A']*10**(seab['Asplund09']+feh)
seab.at[0,'wat'] = HH
seab.at[1,'wat'] = HHe
#convective mass
seab['CMS'] = Mcon*Msun*seab['wat']/np.sum(seab['wat'])
#metheoritic mass
met['Mass_CM'] = Mearth*met/np.sum(met)
#terrestrial mass
seab['mass_earth'] = Mearth*seab['Earth']/np.sum(seab['Earth'])
seab['Mass_CM'] = met['Mass_CM']
#getting observed and predicted elements
merged_tab = pd.merge(tab_ab, seab, how="inner", on=['element'])
total = []
for i in trange(len(merged_tab['Mass_CM']), desc='finding the best solution'):
eacm1 = []
TME = [] # Nmet + Nearth
for Nmet in np.arange(0., 20, 0.1):
for Nearth in np.arange(0., 20, 0.1):
#equation A.5 in Yana Galarza et al. 2016
EACM = np.log10(1 + (Nmet*merged_tab['Mass_CM'][i] + Nearth*merged_tab['mass_earth'][i])/merged_tab['CMS'][i])
eacm1.append(EACM)
TME.append([Nmet, Nearth])
#chi2
chi = (merged_tab['[C/H]'][i] - eacm1)**2/merged_tab['err_[X/H]'][i]**2
total.append(chi)
chisq = []
number = []
for i in range(len(total[0])):
soma = []
for j in range(len(merged_tab['Mass_CM'])):
soma.append(total[j][i])
chi = np.sum(soma)
dof = len(merged_tab['[C/H]']) - 1
Xred = chi / dof
chisq.append(Xred)
#print (Xred)
TTME = []
alpha = []
beta = []
for i in range(len(TME)):
TTME.append(np.sum(TME[i]))
alpha.append(TME[i][0])
beta.append(TME[i][1])
#placing all in data frame
data = {'chi2':chisq, 'Nmet': alpha, 'Nearth': beta, 'Total_mass': TTME}
df = pd.DataFrame(data)
#getting the best value given the minimum chi2
conv = df[df.chi2 == df['chi2'].min()]
if data_output == '':
conv.to_csv('chi2_convolution.csv', index=False)
print (conv.to_string(index=False))
else:
conv.to_csv('chi2_convolution_'+data_output+'.csv', index=False)
print (conv.to_string(index=False))
#plotting results
plt.figure(figsize=(7, 6))
plt.scatter(conv['Total_mass'], conv['chi2'], facecolors='none', edgecolors='r')
plt.axhline(y=conv['chi2'].values, c='r', alpha=0.5)
plt.axvline(x=conv['Total_mass'].values, c='r', alpha=0.5)
plt.scatter(df['Total_mass'], df['chi2'], s=2)
plt.xlabel("Total rocky mass (M$_{\oplus}$)")
plt.ylabel("Chi2")
plt.ylim(conv['chi2'].values-10, conv['chi2'].values+50)
plt.xlim(conv['Total_mass'].values-5, conv['Total_mass'].values+5)
plt.tight_layout()
if data_output == '':
plt.savefig('chi2_test.png', dpi=200)
else:
plt.savefig('chi2_test_'+data_output+'.png', dpi=200)
#getting abundances with the best solution
merged_tab['model [C/H]'] = np.log10(1 + (conv['Nmet'].values*merged_tab['Mass_CM'] + conv['Nearth'].values*merged_tab['mass_earth'])/merged_tab['CMS'])
merged_tab['model [X/H]'] = merged_tab['model [C/H]'] + tab_ab['[X/H]'].values
header = ['Z', 'Tcond', 'element', '[X/H]', 'err_[X/H]', '[C/H]', 'model [C/H]', 'model [X/H]']
if data_output == '':
merged_tab.to_csv('results.csv', columns=header, index=False)
else:
merged_tab.to_csv('results_'+data_output+'.csv', columns=header, index=False)
#plotting model vs observed abundances
plt.figure(figsize=(10, 5))
plt.scatter(merged_tab['Tcond'], merged_tab['[C/H]'], edgecolors='blue', s=170, marker="s", facecolors='none', linewidths=3., alpha=0.8, zorder= 1, label='Observed abundance')
plt.errorbar(merged_tab['Tcond'], merged_tab['[C/H]'], yerr=merged_tab['err_[X/H]'], linestyle='None', marker='None', color='blue', elinewidth=2.5, alpha=0.7)
plt.scatter(merged_tab['Tcond'], merged_tab['model [C/H]'], s=180, c='red', linewidths=1., alpha=0.5, zorder=1, label='Predicted abundance')
plt.legend(loc=2, numpoints=1, prop={'size':12}, shadow=True)
plt.xlabel(r'$\mathrm{Condensation\ Temperature (K)}$', color='black')
plt.ylabel(r'$\Delta \mathrm{[X/C]} \mathrm{(dex)}$')
plt.tight_layout()
if data_output == '':
plt.savefig('model_vs_observed_abundances.png', dpi=200)
else:
plt.savefig('model_vs_observed_abundances_'+data_output+'.png', dpi=200)
|
ramstojhREPO_NAMEterraPATH_START.@terra_extracted@terra-master@[email protected]@.PATH_END.py
|
{
"filename": "test_array.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/tests/test_array.py",
"type": "Python"
}
|
#
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
import operator
import pickle
import pytest
from copy import deepcopy
from cuml.internals.array import (
CumlArray,
_order_to_strides,
array_to_memory_order,
)
from cuml import global_settings
from cuml.internals.mem_type import MemoryType
from cuml.internals.memory_utils import (
_get_size_from_shape,
determine_array_memtype,
using_memory_type,
)
# Temporarily disabled due to CUDA 11.0 issue
# https://github.com/rapidsai/cuml/issues/4332
# from rmm import DeviceBuffer
from cuml.internals.safe_imports import (
cpu_only_import,
cpu_only_import_from,
gpu_only_import,
gpu_only_import_from,
)
from cuml.testing.strategies import (
UNSUPPORTED_CUDF_DTYPES,
create_cuml_array_input,
cuml_array_dtypes,
cuml_array_input_types,
cuml_array_inputs,
cuml_array_orders,
cuml_array_output_types,
cuml_array_shapes,
cuml_array_mem_types,
)
from cuml.testing.utils import (
normalized_shape,
series_squeezed_shape,
squeezed_shape,
to_nparray,
)
from hypothesis import assume, given, settings
from hypothesis import strategies as st
cp = gpu_only_import("cupy")
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cuda = gpu_only_import_from("numba", "cuda")
CudfDataFrame = gpu_only_import_from("cudf", "DataFrame")
CudfSeries = gpu_only_import_from("cudf", "Series")
PandasSeries = cpu_only_import_from("pandas", "Series")
PandasDataFrame = cpu_only_import_from("pandas", "DataFrame")
cp_array = gpu_only_import_from("cupy", "ndarray")
np_array = gpu_only_import_from("numpy", "ndarray")
numba_array = gpu_only_import_from(
"numba.cuda.cudadrv.devicearray", "DeviceNDArray"
)
test_input_types = ["numpy", "numba", "cupy", "series", None]
test_output_types = (
"cupy",
"numpy",
"cudf",
"pandas",
"array",
"numba",
"dataframe",
"series",
"df_obj",
)
_OUTPUT_TYPES_MAPPING = {
"cupy": cp.ndarray,
"numpy": np.ndarray,
"cudf": (CudfDataFrame, CudfSeries),
"pandas": (PandasDataFrame, PandasSeries),
"dataframe": (CudfDataFrame, PandasDataFrame),
"series": (CudfSeries, PandasSeries),
}
def _multidimensional(shape):
return len(squeezed_shape(normalized_shape(shape))) > 1
def _get_owner(curr):
if isinstance(curr, CumlArray):
return curr._owner
elif isinstance(curr, cp.ndarray):
return curr.data.mem._owner
else:
return None
def _assert_equal(array_like, cuml_array):
"""Check whether array-like data and cuml array data are equal."""
assert cp.array_equal(
cp.asarray(array_like),
cuml_array.to_output("cupy"),
equal_nan=True,
)
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
force_gc=st.booleans(),
)
@settings(deadline=None)
def test_array_init(input_type, dtype, shape, order, mem_type, force_gc):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
with using_memory_type(mem_type):
cuml_array = CumlArray(data=input_array)
# Test basic array properties
assert cuml_array.dtype == dtype
if input_type == "series":
assert cuml_array.shape == series_squeezed_shape(shape)
else:
assert cuml_array.shape == normalized_shape(shape)
# Order is only well-defined (and preserved) for multidimensional arrays.
md = isinstance(shape, tuple) and len([d for d in shape if d != 1]) > 1
assert cuml_array.order == order if md else "C"
# Check input array and array equality.
_assert_equal(input_array, cuml_array)
# Check that data is kept in memory even when the input_array reference
# is deleted.
input_array_copy = deepcopy(cp.asarray(input_array))
del input_array
if force_gc:
gc.collect()
_assert_equal(input_array_copy, cuml_array)
@given(
data_type=st.sampled_from([bytes, bytearray, memoryview]),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_array_init_from_bytes(data_type, dtype, shape, order, mem_type):
dtype = np.dtype(dtype)
values = bytes(_get_size_from_shape(shape, dtype)[0])
# Convert to data_type to be tested if needed.
if data_type != bytes:
values = data_type(values)
array = CumlArray(
values, dtype=dtype, shape=shape, order=order, mem_type=mem_type
)
assert array.order == order
assert array.shape in (shape, (shape,))
assert array.dtype == dtype
array_copy = cp.zeros(shape, dtype=dtype)
assert cp.all(cp.asarray(array_copy) == array_copy)
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_array_mem_type(input_type, dtype, shape, order, mem_type):
"""
Test whether we can create CumlArray from all supported types and array
shapes on all supported mem types.
"""
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
# Ensure the array is creatable
array = CumlArray(input_array)
input_mem_type = determine_array_memtype(input_array)
if input_mem_type.is_device_accessible:
joint_mem_type = input_mem_type
else:
joint_mem_type = mem_type
assert joint_mem_type.xpy.all(
joint_mem_type.xpy.asarray(input_array)
== joint_mem_type.xpy.asarray(array)
)
@given(
inp=cuml_array_inputs(),
indices=st.slices(10), # TODO: should be basic_indices() as shown below
# indices=basic_indices((10, 10)),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_get_set_item(inp, indices, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray(data=inp)
# Assumption required due to limitation on step size for F-order.
assume(ary.order != "F" or (indices.step in (None, 1)))
# Check equality of array views.
inp_view = inp[indices]
# Must assume that resulting view must have at least one element to not
# trigger UnownedMemory exception.
assume(mem_type.xpy.isscalar(inp_view) or inp_view.size > 0)
_assert_equal(inp_view, ary[indices])
# Check equality after assigning to array slice.
ary[indices] = inp.dtype.type(1.0)
inp[indices] = inp.dtype.type(1.0)
# We need to assume that inp is not a cudf.Series here, otherwise
# ary.to_output("cupy") called by equal() will trigger a
# CUDARuntimeError: cudaErrorInvalidDevice: invalid device ordinal
# error.
assume(not isinstance(inp, cudf.Series))
_assert_equal(inp, ary)
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_empty(shape, dtype, order, mem_type):
with using_memory_type(mem_type):
ary = CumlArray.empty(shape=shape, dtype=dtype, order=order)
assert isinstance(ary.ptr, int)
assert ary.shape == normalized_shape(shape)
assert ary.dtype == np.dtype(dtype)
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_zeros(shape, dtype, order, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray.zeros(shape=shape, dtype=dtype, order=order)
test = mem_type.xpy.zeros(shape).astype(dtype)
assert mem_type.xpy.all(test == mem_type.xpy.asarray(ary))
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_ones(shape, dtype, order, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray.ones(shape=shape, dtype=dtype, order=order)
test = mem_type.xpy.ones(shape).astype(dtype)
assert mem_type.xpy.all(test == mem_type.xpy.asarray(ary))
@given(
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_create_full(shape, dtype, order, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
value = mem_type.xpy.array([mem_type.xpy.random.randint(100)]).astype(
dtype
)
ary = CumlArray.full(
value=value[0], shape=shape, dtype=dtype, order=order
)
test = mem_type.xpy.zeros(shape).astype(dtype) + value[0]
assert mem_type.xpy.all(test == mem_type.xpy.asarray(ary))
def cudf_compatible_dtypes(dtype):
return dtype not in UNSUPPORTED_CUDF_DTYPES
@given(
inp=cuml_array_inputs(),
input_mem_type=cuml_array_mem_types(),
output_type=cuml_array_output_types(),
)
@settings(deadline=None)
def test_output(inp, input_mem_type, output_type):
# Required assumptions for cudf outputs:
if output_type in ("cudf", "dataframe", "series"):
assume(inp.dtype not in UNSUPPORTED_CUDF_DTYPES)
if output_type == "series":
assume(not _multidimensional(inp.shape))
# Generate CumlArray from input and perform conversion.
with using_memory_type(input_mem_type):
arr = CumlArray(inp)
res = arr.to_output(output_type)
# Check output type
if output_type == "numba": # TODO: is this still needed?
# using correct numba ndarray check
assert cuda.devicearray.is_cuda_ndarray(res)
elif output_type == "cudf":
assert isinstance(
res, CudfDataFrame if _multidimensional(inp.shape) else CudfSeries
)
elif output_type == "pandas":
assert isinstance(
res,
PandasDataFrame if _multidimensional(inp.shape) else PandasSeries,
)
else:
assert isinstance(res, _OUTPUT_TYPES_MAPPING[output_type])
def assert_data_equal_(res):
# Check output data equality
if isinstance(res, CudfSeries):
# A simple equality check `assert cudf.Series(inp).equals(res)`
# does not work for with multi-dimensional data.
assert CudfSeries(np.ravel(inp)).equals(res)
elif isinstance(res, PandasSeries):
assert PandasSeries(np.ravel(inp)).equals(res)
elif isinstance(res, CudfDataFrame):
# Assumption required because of:
# https://github.com/rapidsai/cudf/issues/12266
assume(not np.isnan(res.to_numpy()).any())
assert CudfDataFrame(inp).equals(res)
elif isinstance(res, PandasDataFrame):
assert PandasDataFrame(inp).equals(res)
else:
assert np.array_equal(
to_nparray(inp), to_nparray(res), equal_nan=True
)
assert_data_equal_(res)
@given(
inp=cuml_array_inputs(),
output_type=cuml_array_output_types(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_end_to_end_conversion_via_intermediate(inp, output_type, mem_type):
mem_type = MemoryType.from_str(mem_type)
# This test requires a lot of assumptions in combination with cuDF
# intermediates.
# Assumptions required for cuDF limitations:
assume(
# Not all dtypes are supported by cuDF.
not (
output_type in ("cudf", "pandas", "dataframe", "series")
and inp.dtype in UNSUPPORTED_CUDF_DTYPES
)
)
assume(
# Can't convert multidimensional arrays to a Series.
not (output_type == "series" and len(inp.shape) > 1)
)
assume(
# Cannot convert from DataFrame to CumlArray wihthout explicitly
# specifying shape, dtype, and order.
not (
output_type == "dataframe"
or (output_type == "cudf" and len(inp.shape) > 1)
or (output_type == "pandas" and len(inp.shape) > 1)
)
)
with using_memory_type(mem_type):
# First conversion:
array = CumlArray(data=inp)
_assert_equal(inp, array)
# Second conversion via intermediate
intermediate = array.to_output(output_type)
# Cupy does not support masked arrays.
cai = getattr(intermediate, "__cuda_array_interface__", dict())
assume(cai.get("mask") is None)
array2 = CumlArray(data=intermediate)
_assert_equal(inp, array2)
@given(
output_type=cuml_array_output_types(),
shape=cuml_array_shapes(),
dtype=cuml_array_dtypes(),
order=cuml_array_orders(),
out_dtype=cuml_array_dtypes(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_output_dtype(output_type, shape, dtype, order, out_dtype, mem_type):
with using_memory_type(mem_type):
# Required assumptions for cudf outputs:
if output_type in ("cudf", "dataframe", "series"):
assume(dtype not in UNSUPPORTED_CUDF_DTYPES)
assume(out_dtype not in UNSUPPORTED_CUDF_DTYPES)
if output_type == "series":
assume(not _multidimensional(shape))
# Perform conversion
inp = create_cuml_array_input("numpy", dtype, shape, order)
ary = CumlArray(inp)
res = ary.to_output(output_type=output_type, output_dtype=out_dtype)
# Check output dtype
if isinstance(res, (CudfDataFrame, PandasDataFrame)):
res.values.dtype is out_dtype
else:
res.dtype is out_dtype
@given(inp=cuml_array_inputs(), mem_type=cuml_array_mem_types())
@settings(deadline=None)
def test_array_interface(inp, mem_type):
mem_type = MemoryType.from_str(mem_type)
with using_memory_type(mem_type):
ary = CumlArray(inp)
in_mem_type = determine_array_memtype(inp)
if isinstance(inp, PandasSeries):
converted_inp = inp.to_numpy()
elif isinstance(inp, CudfSeries):
converted_inp = cp.asnumpy(inp.to_cupy())
else:
converted_inp = inp
try:
inp_ai = converted_inp.__cuda_array_interface__
except AttributeError:
inp_ai = converted_inp.__array_interface__
ary_ai = ary._array_interface
# Check Array Interface equality.
assert inp_ai["shape"] == ary_ai["shape"]
assert inp_ai["typestr"] == ary_ai["typestr"]
if (
not isinstance(inp, (PandasSeries, CudfSeries))
and determine_array_memtype(inp) is global_settings.memory_type
):
assert inp_ai["data"] == ary_ai["data"]
# Mismatch for one-dimensional arrays:
if inp_ai.get("strides", None) is not None:
assert inp_ai["strides"] == ary_ai["strides"]
if in_mem_type.is_device_accessible:
joint_mem_type = in_mem_type
else:
joint_mem_type = mem_type
# Check equality
inp_arr = joint_mem_type.xpy.asarray(converted_inp)
out_arr = joint_mem_type.xpy.asarray(ary)
assert joint_mem_type.xpy.all(
inp_arr == out_arr
) or joint_mem_type.xpy.all(
joint_mem_type.xpy.isnan(inp_arr)
== joint_mem_type.xpy.isnan(out_arr)
)
@given(
inp=cuml_array_inputs(),
to_serialize_mem_type=cuml_array_mem_types(),
from_serialize_mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_serialize(inp, to_serialize_mem_type, from_serialize_mem_type):
with using_memory_type(to_serialize_mem_type):
ary = CumlArray(data=inp)
header, frames = ary.serialize()
with using_memory_type(from_serialize_mem_type):
ary2 = CumlArray.deserialize(header, frames)
assert pickle.loads(header["type-serialized"]) is CumlArray
_assert_equal(inp, ary2)
assert ary._array_interface["shape"] == ary2._array_interface["shape"]
# Restricting the strides check due to
# https://github.com/cupy/cupy/issues/5897
if not (
len(ary.shape) > 1
and (
(ary.order == "C" and ary.shape[0] == 1)
or (ary.order == "F" and ary.shape[-1] == 1)
)
):
assert (
ary._array_interface["strides"]
== ary2._array_interface["strides"]
)
assert (
ary._array_interface["typestr"] == ary2._array_interface["typestr"]
)
assert ary2.mem_type is global_settings.memory_type
if isinstance(inp, (cudf.Series, pd.Series)):
assert ary.order == ary2.order
@pytest.mark.parametrize("protocol", [4, 5])
@given(
inp=cuml_array_inputs(),
to_serialize_mem_type=cuml_array_mem_types(),
from_serialize_mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_pickle(protocol, inp, to_serialize_mem_type, from_serialize_mem_type):
with using_memory_type(to_serialize_mem_type):
# Generate CumlArray
ary = CumlArray(data=inp)
# Prepare keyword arguments.
dumps_kwargs = {"protocol": protocol}
loads_kwargs = {}
f = []
len_f = 0
if protocol >= 5:
dumps_kwargs["buffer_callback"] = f.append
loads_kwargs["buffers"] = f
len_f = 1
a = pickle.dumps(ary, **dumps_kwargs)
with using_memory_type(from_serialize_mem_type):
b = pickle.loads(a, **loads_kwargs)
assert ary._array_interface["shape"] == b._array_interface["shape"]
# Restricting the strides check due to
# https://github.com/cupy/cupy/issues/5897
if not (len(ary.shape) > 1 and (ary.shape[0] == 1 or ary.shape[-1] == 1)):
assert ary._array_interface["strides"] == b._array_interface["strides"]
assert ary._array_interface["typestr"] == b._array_interface["typestr"]
# Check equality
assert len(f) == len_f
_assert_equal(inp, b)
if isinstance(inp, (cudf.Series, pd.Series)):
# skipping one dimensional ary order test
assert ary.order == b.order
@given(inp=cuml_array_inputs(), mem_type=cuml_array_mem_types())
@settings(deadline=None)
def test_deepcopy(inp, mem_type):
with using_memory_type(mem_type):
# Generate CumlArray
ary = CumlArray(data=inp)
# Perform deepcopy
b = deepcopy(ary)
# Check equality
_assert_equal(inp, b)
assert ary.ptr != b.ptr
assert ary._array_interface["shape"] == b._array_interface["shape"]
# Restricting the strides check due to
# https://github.com/cupy/cupy/issues/5897
if not (
len(ary.shape) > 1 and (ary.shape[0] == 1 or ary.shape[-1] == 1)
):
assert (
ary._array_interface["strides"]
== b._array_interface["strides"]
)
assert ary._array_interface["typestr"] == b._array_interface["typestr"]
if isinstance(inp, (cudf.Series, pd.Series)):
# skipping one dimensional ary order test
assert ary.order == b.order
@pytest.mark.parametrize("operation", [operator.add, operator.sub])
@given(
a=cuml_array_inputs(),
mem_type=cuml_array_mem_types(),
)
@settings(deadline=None)
def test_cumlary_binops(operation, a, mem_type):
with using_memory_type(mem_type):
b = deepcopy(a)
ary_a = CumlArray(a)
ary_b = CumlArray(b)
c = operation(a, b)
ary_c = operation(ary_a, ary_b)
_assert_equal(c, ary_c)
@pytest.mark.parametrize("order", ["F", "C"])
@given(mem_type=cuml_array_mem_types())
@settings(deadline=None)
def test_sliced_array_owner(order, mem_type):
"""
When slicing a CumlArray, a new object can be created created which
previously had an incorrect owner. This was due to the requirement by
`cudf.core.Buffer` that all data be in "u1" form. CumlArray would satisfy
this requirement by calling
`cp.asarray(data).ravel(order='A').view('u1')`. If the slice is not
contiguous, this would create an intermediate object with no references
that would be cleaned up by GC causing an error when using the memory
"""
mem_type = MemoryType.from_str(mem_type)
xpy = mem_type.xpy
# Create 2 copies of a random array
random_arr = xpy.array(
xpy.random.random((500, 4)), dtype=np.float32, order=order
)
arr = xpy.array(random_arr, copy=True)
with using_memory_type(mem_type):
cuml_array = CumlArray(random_arr)
# Make sure we have 2 pieces of data
if mem_type.is_device_accessible:
assert arr.data.ptr != cuml_array.ptr
else:
assert arr.__array_interface__["data"][0] != cuml_array.ptr
# Since these are C arrays, slice off the first column to ensure they are
# non-contiguous
cuml_slice = cuml_array[1:, 1:]
arr_slice = arr[1:, 1:]
# Delete the input object just to be sure
del random_arr
# Make sure to cleanup any objects. Forces deletion of intermediate owner
# object
gc.collect()
# Calling `to_output` forces use of the pointer. This can fail with a cuda
# error on `cupy.cuda.runtime.pointerGetAttributes(cuml_slice.ptr)` in CUDA
# < 11.0 or cudaErrorInvalidDevice in CUDA > 11.0 (unclear why it changed)
assert xpy.all(
cuml_slice.to_output("array", output_mem_type=mem_type) == arr_slice
)
@given(
input_type=cuml_array_input_types(),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(min_dims=1, max_dims=5),
order=cuml_array_orders(),
)
@settings(deadline=None)
def test_array_to_memory_order(input_type, dtype, shape, order):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
assert array_to_memory_order(input_array, default=order) == order
@given(
input_type=st.sampled_from(("cupy", "numpy")),
dtype=cuml_array_dtypes(),
shape=cuml_array_shapes(min_dims=1, max_dims=5),
order=cuml_array_orders(),
)
@settings(deadline=None)
def test_order_to_strides(input_type, dtype, shape, order):
input_array = create_cuml_array_input(input_type, dtype, shape, order)
if isinstance(shape, int):
shape = (shape,)
assert np.all(
np.array(_order_to_strides(order, shape, dtype))
== np.array(input_array.strides)
)
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@tests@[email protected]_END.py
|
{
"filename": "miscellaneous.py",
"repo_name": "shadden/celmech",
"repo_path": "celmech_extracted/celmech-master/celmech/miscellaneous.py",
"type": "Python"
}
|
import numpy as np
from sympy import symbols, series
from scipy.special import k0,k1,p_roots
import warnings
from . import clibcelmech
from .nbody_simulation_utilities import get_canonical_heliocentric_orbits
from ctypes import POINTER,c_int,c_double,c_long
_machine_eps = np.finfo(np.float64).eps
def get_symbol(latex, subscript=None, **kwargs): # i=None, kwargs
"""
Get a sympy sympy based on an input LaTeX string.
Valid keyword arguments for the function ``sympy.symbols``
can also be passed.
Arguments
---------
latex : string
LaTeX expression to render as a sympy symbol
subscript : string or int, optional
A subscript for the sympy symbol
Returns
-------
sympy symbol
"""
if subscript:
return symbols(r"{0}_{{{1}}}".format(latex, subscript), **kwargs)
else:
return symbols(r"{0}".format(latex), **kwargs)
def get_symbol0(latex, subscript=None, **kwargs): # i=None, kwargs
"""
Same as :func:`get_symbol`, but appends a "0" to the subscript.
"""
if subscript:
return symbols(r"{0}_{{{1}\,0}}".format(latex, subscript), **kwargs)
else:
return symbols(r"{0}_0".format(latex), **kwargs)
def sk(k,y,tol=1.49e-08,rtol=1.49e-08,maxiter=50,miniter=1):
"""
Approximate disturibing function coefficient described in
`Hadden & Lithwick (2018)`_
.. _Hadden & Lithwick (2018): https://ui.adsabs.harvard.edu/abs/2018AJ....156...95H/abstract
Quadrature routine based on scipy.quadrature.
Arguments
---------
k : int
Order of resonance
y : float
e / ecross; must be y<1
tol, rtol: float, optional
Control absolute and relative tolerance of integration.
Iteration stops when error between last two iterates
is less than `tol` OR the relative change is less than
`rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
miniter : int, optional
Minimum order of Gaussian quadrature
Returns
-------
val : float
Gaussian quadrature approximation of s_k(y)
"""
if y>1:
raise ValueError("sk(k,y) called with y={:f}."
"Value of y must be less than 1.")
maxiter=max(miniter+1,maxiter)
val = np.inf
err = np.inf
for n in xrange(miniter,maxiter+1):
newval = _sk_integral_fixed_quad(k,y,n)
err = abs(newval-val)
val = newval
if err<tol or err< rtol*abs(val):
break
else:
warnings.warn("maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err))
return val
def _sk_integral_fixed_quad(k,y,Nquad):
# Get numerical quadrature nodes and weight
nodes,weights = p_roots(Nquad)
# Rescale for integration interval from [-1,1] to [-pi,pi]
nodes = nodes * np.pi
weights = weights * 0.5
arg1 = 2 * k * (1 + y * np.cos(nodes)) / 3
arg2 = k * nodes + 4 * k * y * np.sin(nodes) / 3
integrand = k0(arg1) * np.cos(arg2)
return (2/np.pi) * integrand @ weights
def Dsk(k,y,tol=1.49e-08,rtol=1.49e-08,maxiter=50,miniter=1):
"""
Derivative of disturibing function coefficient s_k
with respect to argument y. Coefficients are described
in `Hadden & Lithwick (2018)`_
.. _Hadden & Lithwick (2018): https://ui.adsabs.harvard.edu/abs/2018AJ....156...95H/abstract
Quadrature routine based on scipy.quadrature.
Arguments
---------
k : int
Order of resonance
y : float
e / ecross; must be y<1
tol, rtol: float, optional
Control absolute and relative tolerance of integration.
Iteration stops when error between last two iterates
is less than `tol` OR the relative change is less than
`rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
miniter : int, optional
Minimum order of Gaussian quadrature
Returns
-------
val : float
Gaussian quadrature approximation of s_k(y)
"""
if y>1:
raise ValueError("sk(k,y) called with y={:f}."
"Value of y must be less than 1.")
maxiter=max(miniter+1,maxiter)
val = np.inf
err = np.inf
for n in xrange(miniter,maxiter+1):
newval = _Dsk_integral_fixed_quad(k,y,n)
err = abs(newval-val)
val = newval
if err<tol or err< rtol*abs(val):
break
else:
warnings.warn("maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err))
return val
def _Dsk_integral_fixed_quad(k,y,Nquad):
# Get numerical quadrature nodes and weight
nodes,weights = p_roots(Nquad)
# Rescale for integration interval from [-1,1] to [-pi,pi]
nodes = nodes * np.pi
weights = weights * 0.5
arg1 = 2 * k * (1 + y * np.cos(nodes)) / 3
arg2 = k * nodes + 4 * k * y * np.sin(nodes) / 3
integrand = -2 * k * k1(arg1) * np.cos(nodes) * np.cos(arg2) / 3 - 4 * k * k0(arg1) * np.sin(nodes) * np.sin(arg2) / 3
return (2/np.pi) * integrand @ weights
def getOmegaMatrix(n):
r"""
Get the 2n x 2n skew-symmetric block matrix
.. math::
\begin{pmatrix}
0 & I_n \\
-I_n & 0
\end{pmatrix},
where :math:`I_n` is the :math:`n \times n` identity matrix,
that appears in Hamilton's equations.
Arguments
---------
n : int
Determines matrix dimension
Returns
-------
numpy.array
"""
zeros = np.zeros((n,n),dtype=int)
I = np.eye(n,dtype=int)
return np.vstack(
(
np.concatenate([zeros,I]).T,
np.concatenate([-I,zeros]).T
)
)
####################################################
################ Orbit linking #####################
####################################################
def EulerMatrix(Omega,inc,omega):
"""
The Euler 3D rotation matrix for Euler angles (Omega,inc,omega). The (3,1,3)
convention is followed.
Parameters
----------
Omega : float
First Euler angle (ascending node)
inc : float
Second Euler angle (inclination)
omega : float
Third Euler angle (argument of periapsis)
Returns
-------
numpy.array
3 x 3 rotation matrix
"""
R = np.eye(3)
s,c = np.sin(omega),np.cos(omega)
R = np.array([[c,-s,0],[s,c,0],[0,0,1]]) @ R
s,c = np.sin(inc),np.cos(inc)
R = np.array([[1,0,0],[0,c,-s],[0,s,c]]) @ R
s,c = np.sin(Omega),np.cos(Omega)
R = np.array([[c,-s,0],[s,c,0],[0,0,1]]) @ R
return R
def linking_l(orbit1,orbit2):
"""
Computes the linking coefficient defined by `Kholshevnikov and Vassiliev
(1999) <https://ui.adsabs.harvard.edu/abs/1999CeMDA..75...67K/abstract>`_
for a pair of Keplerian orbits.
Parameters
----------
orbit1 : rebound.Orbit
First orbit
orbit2 : rebound.Orbit
Second orbit
Returns
-------
float
The linking coefficient, :math:`l_1`, defined by Equation (1) of
Kholshevnikov and Vassiliev (1999)
"""
mtrx1 = EulerMatrix(orbit1.Omega,orbit1.inc,orbit1.omega)
mtrx2 = EulerMatrix(orbit2.Omega,orbit2.inc,orbit2.omega)
P1,Q1,Z1 = mtrx1.T
P2,Q2,Z2 = mtrx2.T
wvec = np.cross(Z1,Z2)
w = np.linalg.norm(wvec)
a1,e1 = orbit1.a,orbit1.e
a2,e2 = orbit2.a,orbit2.e
p1 = a1 * (1 - e1*e1)
p2 = a2 * (1 - e2*e2)
R1 = p1 * w / (w - e1 * np.dot(P1,wvec))
R2 = p2 * w / (w - e2 * np.dot(P2,wvec))
r1 = p1 * w / (w + e1 * np.dot(P1,wvec))
r2 = p2 * w / (w + e2 * np.dot(P2,wvec))
return (r2-r1)*(R2-R1)
######################################################
################ AMD Calculation #####################
######################################################
from scipy.optimize import brenth
def _F(e,alpha,gamma):
"""Equation 35 of Laskar & Petit (2017)"""
denom = np.sqrt(alpha*(1-e*e)+gamma*gamma*e*e)
return alpha*e -1 + alpha + gamma*e / denom
def _F_for_res_overlap(e,alpha,gamma,mutot):
"""Equation 35 of Laskar & Petit (2017)"""
fByg = alpha**(0.825)
ecross = 1/alpha - 1
daBya= 1 - alpha
ecrit = ecross * np.exp(-2.2 * mutot**(1/3) * daBya**(-4/3))
denom = np.sqrt( (1 - e*e) * fByg**2 + e*e*alpha*gamma*gamma )
e1 = gamma * np.sqrt(alpha) * e / denom
return fByg * e + e1 - fByg * ecrit
def critical_relative_AMD(alpha,gamma):
r"""
The critical value of 'relative AMD', :math:`{\cal C} = C/\Lambda_\mathrm{out}`,
of a planet pair above which intersecting orbits are allowed.
See Equation 29 of
`Laskar & Petit (2017) <https://ui.adsabs.harvard.edu/abs/2017A%26A...605A..72L/abstract>`_
Arguments
---------
alpha : float
The semi-major axis ratio, :math:`\alpha=a_\mathrm{in}/a_\mathrm{out}` of the planet pair.
gamma : float
The mass ratio of the planet pair, :math:`\gamma = m_\mathrm{in}/m_\mathrm{out}`.
Returns
-------
Ccrit : float
The value of the the critical AMD
(:math:`C_c(\alpha,\gamma)` in the notation of
`Laskar & Petit (2017)
<https://ui.adsabs.harvard.edu/abs/2017A%26A...605A..72L/abstract>`_
"""
e0 = np.min((1,1/alpha-1))
ec = brenth(_F,0,e0,args=(alpha,gamma))
e1c = np.sin(np.arctan(gamma*ec / np.sqrt(alpha*(1-ec*ec))))
curlyC = gamma*np.sqrt(alpha) * (1-np.sqrt(1-ec*ec)) + (1 - np.sqrt(1-e1c*e1c))
return curlyC
def critical_relative_AMD_resonance_overlap(alpha,gamma,mutot):
r"""
The critical value of 'relative AMD', :math:`{\cal C} = C/\Lambda_\mathrm{out}`,
of a planet pair above which resonance overlap can occur based
on the resonance overlap criterion of Hadden & Lithwick (2018)
Arguments
---------
alpha : float
The semi-major axis ratio, :math:`\alpha=a_\mathrm{in}/a_\mathrm{out}` of the planet pair.
gamma : float
The mass ratio of the planet pair, :math:`\gamma = m_\mathrm{in}/m_\mathrm{out}`.
mutot : float
The total mass of the planet pair relative to the star, i.e.,
:math:`(\mu_\mathrm{in} + \mu_\mathrm{out}) / M_*`
Returns
-------
Ccrit : float
The value of the the critical AMD
(:math:`C_c(\alpha,\gamma)` in the notation of
`Laskar & Petit (2017)
<https://ui.adsabs.harvard.edu/abs/2017A%26A...605A..72L/abstract>`_
"""
e0 = np.min((1,1/alpha-1))
ec = brenth(_F_for_res_overlap,0,e0,args=(alpha,gamma,mutot))
fByg = alpha**(0.825)
denom = np.sqrt( (1 - ec*ec) * fByg**2 + ec*ec*alpha*gamma*gamma )
e1c = gamma * np.sqrt(alpha) * ec / denom
curlyC = gamma*np.sqrt(alpha) * (1-np.sqrt(1-ec*ec)) + (1 - np.sqrt(1-e1c*e1c))
return curlyC
def compute_AMD(sim):
"""
Compute total AMD of a planetary system.
The angular momentum deficit (AMD) of a
planetary system is the difference between
the angular momentum of a hypothetical system
with the same masses and semi-major axes but with
circular, coplanar orbits and the actual
angular momentum of a planetary system.
It is a conserved quantity of the purely
secular dynamics of a system.
Arguments
---------
sim : :class:`rebound.Simulation`
A REBOUND simulation of a planetary system.
Returns
-------
AMD : float
The value of the systems angular momentum
deficit.
"""
# copy sim and move to center of mass
sim = sim.copy()
sim.move_to_com()
pstar = sim.particles[0]
Mstar = pstar.m
Ltot = pstar.m * np.cross(pstar.xyz,pstar.vxyz)
ps = sim.particles[1:]
Lmbda=np.zeros(len(ps))
G = np.zeros(len(ps))
Lhat = np.zeros((len(ps),3))
ch_orbits = get_canonical_heliocentric_orbits(sim)
for k,p in enumerate(sim.particles[1:]):
orb = ch_orbits[k]
GMi = sim.G * (p.m + Mstar)
mu = p.m*Mstar/(p.m + Mstar)
Lmbda[k] = mu * np.sqrt(GMi * orb.a)
G[k] = Lmbda[k] * np.sqrt(1-orb.e*orb.e)
hvec = np.cross(p.xyz,p.vxyz)
Lhat[k] = hvec / np.linalg.norm(hvec)
Ltot = Ltot + p.m * hvec
cosi = np.array([Lh.dot(Ltot) for Lh in Lhat]) / np.linalg.norm(Ltot)
return np.sum(Lmbda) - np.sum(G * cosi)
def AMD_stable_Q(sim):
"""
Test the AMD-stability of a planetary system.
Returns :code:`True` if a planetary system is AMD-stable
and :code:`False` if not.
Arguments
---------
sim : :class:`rebound.Simulation`
Simulation object to copmute stability criterion for.
Returns
-------
bool :
:code:`True` if the sytem is AMD-stable, otherwise :code:`False`.
"""
AMD = compute_AMD(sim)
pstar = sim.particles[0]
ps = sim.particles[1:]
for i in range(len(ps)-1):
pIn = ps[i]
pOut = ps[i+1]
orbIn = pIn.orbit(pstar)
orbOut = pOut.orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
# Lambda of outer particle
Mi = pstar.m + pOut.m
mu_i = pOut.m * pstar.m / Mi
LmbdaOut = mu_i * np.sqrt(sim.G * Mi * orbOut.a)
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
if C>Ccrit:
return False
return True
def AMD_stability_coefficients(sim,overlap=False):
r"""
Compute AMD stability coefficients
of the successive adjacent planet pairs
of a planetary system.
A planet pair's AMD stability coefficicent
is defined as the total planetary system's
AMD divided by the critical AMD required
for the pair's orbits to cross.
(Equation 58 of `Laskar & Petit (2017)
<https://ui.adsabs.harvard.edu/abs/2017A%26A...605A..72L/abstract>`_)
Arguments
---------
sim : rebound.Simulation
Simulation object to copmute AMD coefficients for.
overlap : bool, optional
If True, planet pairs' critical AMD values are computed
as the critical AMD value for resonance overlap. By default
the critical values are computed as the value required
for orbit crossing.
Returns
-------
ndarray :
Values of :math:`\beta = \frac{C}{\Lambda'C_c}`
for planet pairs.
"""
AMD = compute_AMD(sim)
pstar = sim.particles[0]
ps = sim.particles[1:]
coeffs = np.zeros(len(ps)-1)
for i in range(len(ps)-1):
pIn = ps[i]
pOut = ps[i+1]
orbIn = pIn.orbit(pstar)
orbOut = pOut.orbit(pstar)
alpha = orbIn.a / orbOut.a
gamma = pIn.m / pOut.m
# Calculate Lambda of the outer particle
Mi = pstar.m + pOut.m
mu_i = pOut.m * pstar.m / Mi
LmbdaOut = mu_i * np.sqrt(sim.G * Mi * orbOut.a)
if overlap:
mutot = (pIn.m + pOut.m) / pstar.m
Ccrit = critical_relative_AMD_resonance_overlap(alpha,gamma,mutot)
else:
Ccrit = critical_relative_AMD(alpha,gamma)
C = AMD / LmbdaOut
coeffs[i] = C / Ccrit
return coeffs
######################################################
######################## FMFT ########################
######################################################
p2d = np.ctypeslib.ndpointer(dtype = np.float64,ndim = 2,flags = 'C')
import os
if not os.getenv('READTHEDOCS'):
_fmft = clibcelmech.fmft_wrapper
_fmft.argtypes =[p2d, c_int, c_double, c_double, c_int, p2d, c_long]
_fmft.restype = c_int
def _check_errors(ret, func, args):
if ret<=0:
raise RuntimeError("FMFT returned error code %d for the given arguments"%ret)
return ret
_fmft.errcheck = _check_errors
else:
_fmft = lambda *args: None
def _nearest_pow2(x):
return int(2**np.floor(np.log2(x)))
def frequency_modified_fourier_transform(time, z, Nfreq, method_flag = 3, min_freq = None, max_freq = None):
"""
Apply the frequency-modified Fourier transfrorm algorithm of `Šidlichovský & Nesvorný (1996)`_
to a time series to determine the series' principle Fourier modes. This function simply
proivdes a wrapper to to C implementation written by D. Nesvorný available
at `www-n.oca.eu/nesvorny/programs.html`_.
.. _Šidlichovský & Nesvorný (1996): https://ui.adsabs.harvard.edu/abs/1996CeMDA..65..137S/abstract>
.. _www-n.oca.eu/nesvorny/programs.html: https://www-n.oca.eu/nesvorny/programs.html
Arguments
---------
time : ndarray, shape (N,)
Times of input data values.
z : complex ndarray, shape (N,)
Input data time series in the form.
Nfreq : int
Number of Fourier modes to determine.
method_flag : int
The FMFT algorithm
Basic Fourier Transform algorithm if flag = 0; not implemented
Modified Fourier Transform if flag = 1;
Frequency Modified Fourier Transform if flag = 2;
FMFT with additional non-linear correction if flag = 3
"""
output_arr = np.empty((Nfreq,3),order='C',dtype=np.float64)
input_arr = np.array(np.vstack((time,np.real(z),np.imag(z))).T,order='C',dtype=np.float64)
Ndata = _nearest_pow2(len(input_arr))
dt = time[1]-time[0]
_Nyq = np.pi / dt
if not min_freq:
min_freq = -1 * _Nyq
if not max_freq:
max_freq = _Nyq
_fmft( output_arr,
Nfreq,
min_freq,
max_freq,
c_int(method_flag),
input_arr,
c_long(Ndata)
)
return {x[0]:x[1]*np.exp(1j*x[2]) for x in output_arr}
def holman_weigert_stability_boundary(mu,e,Ptype=True):
r"""
Compute the critical semi-major axis represnting an approximate
stability boundary for circumbinary planets in P- or S-type orbits.
Formulas for critical semi-major axes are taken from `Holman & Wiegert (1999)`_
.. _Holman & Wiegert (1999): https://ui.adsabs.harvard.edu/abs/1999AJ....117..621H/abstract
Arguments
---------
mu : float
The mass-ratio of the binary,
.. math::
\mu = \frac{m_B}{m_A+m_B}
where math:`m_A` and math:`m_B` are the component masses of the binary.
e : float
The eccentricity of the binary.
Ptype : bool, optional
If ``True`` (default) orbit is assumed to be a P-type circumbinary orbit.
If ``False``, a S-type circum-primary/secondary orbit is considered.
Returns
-------
aC : float
The critical semi-major axis marking the stability boundary
"""
if Ptype:
if mu<0.1 or mu>0.5:
warnings.warn("Input 'mu'={:.2g} is outside range [0.1,0.5] for which the stability boundary has been computed".format(mu))
if e<0.0 or e>0.7:
warnings.warn("Input 'e'={:.2g} is outside range [0.0,0.7] for which the stability boundary has been computed".format(e))
aC = 1.6
aC += 5.1 * e
aC += -2.22 * e * e
aC += 4.12 * mu
aC += -4.27 * mu * e
aC += -5.09 * mu * mu
aC += 4.61 * mu * mu * e * e
else:
if mu<0.1 or mu>0.9:
warnings.warn("Input 'mu'={:.2g} is outside range [0.1,0.5] for which the stability boundary has been computed".format(mu))
if e<0.0 or e>0.8:
warnings.warn("Input 'e'={:.2g} is outside range [0.0,0.7] for which the stability boundary has been computed".format(e))
aC = 0.464
aC += -0.38 * mu
aC += -0.631 * e
aC += 0.586 * mu * e
aC += 0.150 * e * e
aC += -0.198 * mu * e * e
return aC
#######################
from sympy import diff, Matrix
def poisson_bracket(f,g,re_varslist,complex_varslist):
r"""
Calculate the Poisson bracket
.. math::
[f,g] = \sum_{i=1}^N
\frac{\partial f}{\partial q_i}
\frac{\partial g}{\partial p_i}
-
\frac{\partial f}{\partial p_i}
\frac{\partial g}{\partial q_i}
-
i \sum_{j=1}^{M}
\frac{\partial f}{\partial z_j}
\frac{\partial g}{\partial \bar{z}_j}
-
\frac{\partial f}{\partial \bar{z}_j}
\frac{\partial g}{\partial {z}_i}
where :code:`re_varslist` is :math:`=(q_1,...,q_N,p_1,...,p_N)`
and :code:`complex_varslist` is :math:`=(x_1,...,x_M,\bar{x}_1,...,\bar{x}_M)`.
Arguments
---------
f : sympy expression
Function appearing in Poisson bracket.
g : sympy expression
Other function appearing in Poisson bracket.
re_varslist : list of sympy symbols
List of real canonical variables in the form
:math:`(q_1,...,q_N,p_1,...,p_N)`
complex_varslist : list of sympy symbols
List of complex canonical variables in the form
:math:`(x_1,...,x_M,\bar{x}_1,...,\bar{x}_M)`
Returns
-------
sympy expression
"""
br = 0
if len(complex_varslist)>0:
Omega_c =Matrix(-1j * getOmegaMatrix(len(complex_varslist)//2))
gradf_c = Matrix([diff(f,v) for v in complex_varslist])
gradg_c = Matrix([diff(g,v) for v in complex_varslist])
br += gradf_c.dot(Omega_c * gradg_c)
if len(re_varslist)>0:
Omega_re=Matrix(getOmegaMatrix(len(re_varslist)//2))
gradf_re = Matrix([diff(f,v) for v in re_varslist])
gradg_re = Matrix([diff(g,v) for v in re_varslist])
br+= gradf_re.dot(Omega_re * gradg_re)
return br
def truncated_expansion(exprn,order_rules,max_order):
r"""
Expand a sympy expression up to a maximum order in a
small book-keeping parameter after assigning variables
appearing in the expression a given order using the
`order_rules` argument.
Arguments
---------
exprn : sympy expression
The original expression from which to calculate
expansion.
order_rules : dict
A dictionary specifying what order various variables
should be assumed to have in the book-keeping parameter.
Each key-value pair ``{n:[x_1,x_2,..,x_m]}`` in ``order_rules``
specifies that a set of variables
.. math::
(x_1,...,x_m) \sim \mathcal{O}(\epsilon^n)
where :math:`\epsilon` is the book-keeping parameter.
max_order : int
The order at which the resulting series expansion in
the book-keeping parameter :math:`\epsilon` should
be truncated.
Returns
-------
sympy expression
"""
eps = symbols("epsilon")
assert eps not in exprn.free_symbols, "Epsilon appears as a free symbols in 'exprn'."
rule = dict()
for n,variables in order_rules.items():
rule.update({v:eps**n * v for v in variables})
sexprn = series(exprn.subs(rule),eps,0,max_order+1)
result = sexprn.removeO().subs({eps:1})
return result
################################################
########### Levin Method Integration ###########
################################################
from scipy.linalg import lu_factor, lu_solve
def linsolve(A,y):
"""
Solve linear system of equations
.. math::
A \cdot x = y
for y.
"""
return lu_solve(lu_factor(A),y)
def _chebyshev_gauss_lobatto_points(n,a,b):
"""Get Gauss-Lobatto quadrature points for Chebyshev polynomials"""
return 0.5 * (a+b) + 0.5 * (a-b) * np.cos(np.pi*np.arange(n)/(n-1))
def _chebyshev_Dmatrix(n):
"""Chebyshev derivative matrix for pseudo-spectral method"""
x = np.cos(np.pi*np.arange(n)/(n-1))
c = lambda j: 2 if j==0 or j==n-1 else 1
Dkj = lambda k,j: (c(k)/c(j))*(-1)**(k+j+1) / (x[k]-x[j]) if k!=j else 0
Dmtrx = np.array([[Dkj(k,j) for j in range(n)]for k in range(n)])
Dmtrx -= np.diag(np.sum(Dmtrx,axis=1))
return Dmtrx
def levin_method_integrate(fvec_fn,wvec_fn,Amtrx_fns,a,b,N=32):
r"""Evlauate integrals of the form
.. math::
I(a,b) = \int_{a}^{b} \vec{f}(x)\cdot\vec{w}(x) dx
where the functions :math:`\vec{w}(x)` satisfy a linear differential
equation of the form :math:`\frac{d}{dx}\vec{w}(x) = A(x) \cdot
\vec{w}(x)`. Evaluation is done using `Levin's method`_.
.. _Levin's method: https://www.sciencedirect.com/science/article/pii/0377042794001189
Parameters
----------
fvec_fn : function
A function that returns the vector :math:`\vec{f}(x)`.
wvec_fn : function
A function that returns the vector :math:`\vec{w}(x)`.
Amtrx_fns : list of functions
A list of functions giving the entries of matrix :math:`A(x)` appearing
in the differential equation obeyed by :math:`\vec{w}(x)`. Input should
use nested lists to match the structure of the matrix.
a : float
Lower integration limit
b : float
Upper integration limit
N : int
Number of quadrature points to use
Returns
-------
float
"""
chebD = _chebyshev_Dmatrix(N)
wa = wvec_fn(a)
wb = wvec_fn(b)
f = 0.5 * (b-a)
xj = _chebyshev_gauss_lobatto_points(N,a,b)
zeroN = np.zeros((N,N))
M = len(wa)
D = np.block([[chebD if j==i else zeroN for j in range(M)] for i in range(M)])
A_tr_mtrx = np.block([[np.diag(Amtrx_fns[i][j](xj)) for i in range(M)] for j in range(M)])
Fsoln = linsolve(D + f*A_tr_mtrx,f*np.reshape(fvec_fn(xj),-1))
Fa,Fb = Fsoln[::N],Fsoln[N-1::N]
ans=Fb@wb - Fa@wa
return ans
def levin_method_integrate_adaptive(fvec_fn,wvec_fn,Amtrx_fns,a,b,N0=32,Nmax=128,rtol = 1.49e-08,atol = 1.49e-08 ):
r"""Evlauate integrals of the form
.. math::
I(a,b) = \int_{a}^{b} \vec{f}(x)\cdot\vec{w}(x) dx
where the functions :math:`\vec{w}(x)` satisfy a linear differential
equation of the form :math:`\frac{d}{dx}\vec{w}(x) = A(x) \cdot
\vec{w}(x)`. Evaluation is done using `Levin's method`_.
Method is applied adaptively with increasing number of quadrature points
until the estimated error, :math:`\delta` satisfies :math:`\delta <
\epsilon_\mathrm{rel}|I(a,b)| + \epsilon_\mathrm{abs}`.
.. _Levin's method: https://www.sciencedirect.com/science/article/pii/0377042794001189
Parameters
----------
fvec_fn : function
A function that returns the vector :math:`\vec{f}(x)`.
wvec_fn : function
A function that returns the vector :math:`\vec{w}(x)`.
Amtrx_fns : list of functions
A list of functions giving the entries of matrix :math:`A(x)` appearing
in the differential equation obeyed by :math:`\vec{w}(x)`. Input should
use nested lists to match the structure of the matrix.
a : float
Lower integration limit
b : float
Upper integration limit
N0 : int
Initial number of Gauss-Lobatto quadrature points to use.
Nmax : int
Maximum number of quadrature points to use.
rtol : float
Relative tolerance
atol : float
Absolute tolerance
Returns
-------
float
"""
delta = np.inf
ans_old = np.inf
N=N0
while N<=Nmax:
ans = levin_method_integrate(fvec_fn,wvec_fn,Amtrx_fns,a,b,N)
delta = np.abs(ans-ans_old)
ans_old = ans
N*=2
if delta < rtol * np.abs(ans) + atol:
break
else:
msg="Exceeded maximum number of quadruature points without converging.\n"
msg+="N={}, delta = {}, target = {}".format(N,delta,rtol * np.abs(ans) + atol)
warnings.warn(msg)
return ans
|
shaddenREPO_NAMEcelmechPATH_START.@celmech_extracted@celmech-master@[email protected]@.PATH_END.py
|
{
"filename": "input_pipeline.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/examples/wmt/input_pipeline.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for a WMT dataset."""
import os
from typing import Dict, Optional, List, Union
from clu import deterministic_data
import ml_collections
import tensorflow as tf
import tensorflow_datasets as tfds
import tokenizer
AUTOTUNE = tf.data.AUTOTUNE
Features = dict[str, tf.Tensor]
class NormalizeFeatureNamesOp:
"""Normalizes feature names to 'inputs' and 'targets'."""
def __init__(self, ds_info: tfds.core.DatasetInfo, reverse_translation: bool):
self.input_lang, self.target_lang = ds_info.supervised_keys
if reverse_translation:
self.input_lang, self.target_lang = self.target_lang, self.input_lang
def __call__(self, features: Features) -> Features:
features['inputs'] = features.pop(self.input_lang)
features['targets'] = features.pop(self.target_lang)
return features
def get_raw_dataset(
dataset_builder: tfds.core.DatasetBuilder,
split: str,
*,
reverse_translation: bool = False,
) -> tf.data.Dataset:
"""Loads a raw WMT dataset and normalizes feature keys.
Args:
dataset_builder: TFDS dataset builder that can build `slit`.
split: Split to use. This must be the full split. We shard the split across
multiple hosts and currently don't support sharding subsplits.
reverse_translation: bool: whether to reverse the translation direction.
e.g. for 'de-en' this translates from english to german.
Returns:
Dataset with source and target language features mapped to 'inputs' and
'targets'.
"""
num_examples = dataset_builder.info.splits[split].num_examples
per_host_split = deterministic_data.get_read_instruction_for_host(
split, num_examples, drop_remainder=False
)
ds = dataset_builder.as_dataset(split=per_host_split, shuffle_files=False)
ds = ds.map(
NormalizeFeatureNamesOp(
dataset_builder.info, reverse_translation=reverse_translation
),
num_parallel_calls=AUTOTUNE,
)
return ds
def pack_dataset(
dataset: tf.data.Dataset,
key2length: int | dict[str, int],
keys: list[str] | None = None,
) -> tf.data.Dataset:
"""Creates a 'packed' version of a dataset on-the-fly.
Adapted from the mesh-tf implementation.
This is meant to replace the irritation of having to create a separate
"packed" version of a dataset to train efficiently on TPU.
Each example in the output dataset represents several examples in the
input dataset.
For each key in the input dataset, two additional keys are created:
<key>_segmentation: an int32 tensor identifying the parts
representing the original example.
<key>_position: an int32 tensor identifying the position within the original
example.
Example:
Two input examples get combined to form an output example.
The input examples are:
{"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
{"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
The output example is:
{
"inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
"inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
"inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
"targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
"targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
"targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
}
0 represents padding in both the inputs and the outputs.
Sequences in the incoming examples are truncated to length "length", and the
sequences in the output examples all have fixed (padded) length "length".
Args:
dataset: a tf.data.Dataset
key2length: an integer, or a dict from feature-key to integer
keys: a list of strings (e.g. ["inputs", "targets"])
Returns:
a tf.data.Dataset
"""
shapes = tf.nest.map_structure(lambda spec: spec.shape, dataset.element_spec)
if keys is None:
keys = list(shapes.keys())
for k in keys:
if k not in shapes:
raise ValueError(
'Key %s not found in dataset. Available keys are %s'
% (k, shapes.keys())
)
if not shapes[k].is_compatible_with(tf.TensorShape([None])): # type: ignore[wrong-arg-types]
raise ValueError('Tensors to be packed must be one-dimensional.')
# make sure that the length dictionary contains all keys as well as the
# keys suffixed by "_segmentation" and "_position"
if isinstance(key2length, int):
key2length = {k: key2length for k in keys}
for k in keys:
for suffix in ['_segmentation', '_position']:
key2length[k + suffix] = key2length[k]
# trim to length
dataset = dataset.map(
lambda x: {k: x[k][: key2length[k]] for k in keys},
num_parallel_calls=AUTOTUNE,
)
# Setting batch_size=length ensures that the concatenated sequences (if they
# have length >=1) are sufficient to fill at least one packed example.
batch_size = max(key2length.values())
dataset = dataset.padded_batch(
batch_size, padded_shapes={k: [-1] for k in keys}
)
dataset = _pack_with_tf_ops(dataset, keys, key2length)
# Set the Tensor shapes correctly since they get lost in the process.
def my_fn(x):
return {k: tf.reshape(v, [key2length[k]]) for k, v in x.items()}
return dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
def _pack_with_tf_ops(
dataset: tf.data.Dataset, keys: list[str], key2length: dict[str, int]
) -> tf.data.Dataset:
"""Helper-function for packing a dataset which has already been batched.
Helper for pack_dataset() Uses tf.while_loop.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings
key2length: an dict from feature-key to integer
Returns:
a dataset.
"""
empty_example = {}
for k in keys:
empty_example[k] = tf.zeros([0], dtype=tf.int32)
empty_example[k + '_position'] = tf.zeros([0], dtype=tf.int32)
keys_etc = empty_example.keys()
def write_packed_example(partial, outputs):
new_partial = empty_example.copy()
new_outputs = {}
for k in keys_etc:
new_outputs[k] = outputs[k].write(
outputs[k].size(),
tf.pad(partial[k], [[0, key2length[k] - tf.size(partial[k])]]),
)
return new_partial, new_outputs
def map_fn(x):
"""Internal function to flat_map over.
Consumes a batch of input examples and produces a variable number of output
examples.
Args:
x: a single example
Returns:
a tf.data.Dataset
"""
partial = empty_example.copy()
i = tf.zeros([], dtype=tf.int32)
dynamic_batch_size = tf.shape(x[keys[0]])[0]
outputs = {}
for k in keys:
outputs[k] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[key2length[k]]
)
outputs[k + '_position'] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[key2length[k]]
)
def body_fn(i, partial, outputs):
"""Body function for while_loop.
Args:
i: integer scalar
partial: dictionary of Tensor (partially-constructed example)
outputs: dictionary of TensorArray
Returns:
A triple containing the new values of the inputs.
"""
can_append = True
one_example = {}
for k in keys:
val = tf.cast(x[k][i], tf.int32)
val = val[: tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
one_example[k] = val
for k in keys:
can_append = tf.logical_and(
can_append,
tf.less_equal(
tf.size(partial[k]) + tf.size(one_example[k]), key2length[k]
),
)
def false_fn():
return write_packed_example(partial, outputs)
def true_fn():
return partial, outputs
partial, outputs = tf.cond(can_append, true_fn, false_fn)
new_partial = {}
for k in keys:
new_seq = one_example[k][: key2length[k]]
new_seq_len = tf.size(new_seq)
new_partial[k] = tf.concat([partial[k], new_seq], 0)
new_partial[k + '_position'] = tf.concat(
[partial[k + '_position'], tf.range(new_seq_len)], 0
)
partial = new_partial
return i + 1, partial, outputs
# For loop over all examples in the batch.
i, partial, outputs = tf.while_loop(
cond=lambda *_: True,
body=body_fn,
loop_vars=(i, partial, outputs),
shape_invariants=(
tf.TensorShape([]), # type: ignore[wrong-arg-types]
{k: tf.TensorShape([None]) for k in keys_etc}, # type: ignore[wrong-arg-types]
{k: tf.TensorShape(None) for k in keys_etc}, # type: ignore[wrong-arg-types]
),
maximum_iterations=dynamic_batch_size,
)
_, outputs = write_packed_example(partial, outputs)
packed = {k: outputs[k].stack() for k in keys_etc}
for k in keys:
packed[k + '_segmentation'] = tf.cumsum(
tf.cast(tf.equal(packed[k + '_position'], 0), tf.int32), axis=1
) * tf.cast(tf.not_equal(packed[k], 0), tf.int32)
return packed
dataset = dataset.map(map_fn, num_parallel_calls=AUTOTUNE)
return dataset.unbatch()
# -----------------------------------------------------------------------------
# Main dataset prep routines.
# -----------------------------------------------------------------------------
def preprocess_wmt_data(
dataset,
shuffle: bool,
num_epochs: int | None = 1,
pack_examples: bool = True,
shuffle_buffer_size: int = 1024,
max_length: int = 512,
batch_size: int = 256,
drop_remainder: bool = True,
prefetch_size: int = AUTOTUNE,
):
"""Shuffle and batch/pack the given dataset."""
def length_filter(max_len):
def filter_fn(x):
source, target = x['inputs'], x['targets']
l = tf.maximum(tf.shape(source)[0], tf.shape(target)[0])
return tf.less(l, max_len + 1)
return filter_fn
if max_length > 0:
dataset = dataset.filter(length_filter(max_length))
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.repeat(num_epochs)
if pack_examples:
dataset = pack_dataset(dataset, max_length)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
else: # simple (static-shape) padded batching
dataset = dataset.padded_batch(
batch_size,
padded_shapes={'inputs': max_length, 'targets': max_length},
padding_values={'inputs': 0, 'targets': 0},
drop_remainder=drop_remainder,
)
if prefetch_size:
dataset = dataset.prefetch(prefetch_size)
return dataset
def get_wmt_datasets(
config: ml_collections.ConfigDict,
*,
n_devices: int,
reverse_translation: bool = True,
vocab_path: str | None = None,
):
"""Load and return dataset of batched examples for use during training."""
if vocab_path is None:
vocab_path = os.path.expanduser('~/wmt_sentencepiece_model')
train_ds_builder = tfds.builder(config.dataset_name)
train_data = get_raw_dataset(
train_ds_builder, 'train', reverse_translation=reverse_translation
)
if config.eval_dataset_name:
eval_ds_builder = tfds.builder(config.eval_dataset_name)
else:
eval_ds_builder = train_ds_builder
eval_data = get_raw_dataset(
eval_ds_builder,
config.eval_split,
reverse_translation=reverse_translation,
)
# Tokenize data.
sp_tokenizer = tokenizer.load_or_train_tokenizer(
train_data,
vocab_path=vocab_path,
vocab_size=config.vocab_size,
max_corpus_chars=config.max_corpus_chars,
)
train_data = train_data.map(
tokenizer.TokenizeOp(sp_tokenizer), num_parallel_calls=AUTOTUNE
)
eval_data = eval_data.map(
tokenizer.TokenizeOp(sp_tokenizer), num_parallel_calls=AUTOTUNE
)
batch_size = config.per_device_batch_size * n_devices
train_ds = preprocess_wmt_data(
train_data,
shuffle=True,
num_epochs=None,
pack_examples=True,
batch_size=batch_size,
max_length=config.max_target_length,
)
eval_ds = preprocess_wmt_data(
eval_data,
shuffle=False,
pack_examples=False,
batch_size=batch_size,
max_length=config.max_eval_target_length,
)
predict_ds = preprocess_wmt_data(
eval_data,
shuffle=False,
pack_examples=False,
batch_size=batch_size,
max_length=config.max_predict_length,
drop_remainder=False,
)
return train_ds, eval_ds, predict_ds, sp_tokenizer
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@examples@wmt@[email protected]_END.py
|
{
"filename": "TRES_options.py",
"repo_name": "amusecode/TRES",
"repo_path": "TRES_extracted/TRES-main/TRES_options.py",
"type": "Python"
}
|
import numpy as np
from amuse.units import units
#--------------------------------------------------------------------------------------------------------------------
# TRES general settings
REPORT_USER_WARNINGS = True
REPORT_DEBUG = False
REPORT_DT = False
REPORT_SN_EVOLUTION = False
REPORT_TRIPLE_EVOLUTION = False
MAKE_PLOTS = False
REPORT_BINARY_EVOLUTION = False
REPORT_FUNCTION_NAMES = False
REPORT_MASS_TRANSFER_STABILITY = False
GET_GYRATION_RADIUS_FROM_STELLAR_CODE = False
GET_AMC_FROM_STELLAR_CODE = False
no_stellar_evolution = False
#--------------------------------------------------------------------------------------------------------------------
#TRES constants
time_step_factor_stable_mt = 0.01 #1% mass loss during mass transfer
# lowering this to 0.005 makes the code twice as slow
time_step_factor_ecc = 0.01
#Rl_fraction = 0.8
# 0.01 -> error in the semi-major axis of about 0.5%
maximum_wind_mass_loss_factor = 0.01
error_dm = 0.05
#maximum_radius_change_factor = 0.005
error_dr = 0.05 #0.01
minimum_time_step = 1.e-9 |units.Myr
max_mass = 100 |units.MSun
min_mass = 0.08 |units.MSun # for primary stars
maximum_time_step_factor = 100.
maximum_time_step_factor_after_stable_mt = 5.
time_step_factor_find_RLOF = 0.5
#Rl_fraction = 0.9#1.0-10.*error_dr # ratio or star radius over Roche lobe at which time step is decreased
# radius grows maximally by error_dr
time_step_factor_kozai = 0.025 # 0.2*0.1, 0.2-> for error in kozai timescale, 0.1-> 10 steps per cycle
kozai_type_factor = 10.
maximum_time_step = np.inf|units.Myr
kanonical_neutron_star_mass = 1.4|units.MSun
fall_back_mass = 41 |units.MSun
#--------------------------------------------------------------------------------------------------------------------
#TPS general settings
REPORT_TPS = False
REPORT_USER_WARNINGS_TPS = False
EXCLUDE_SSO = True #in order to not simulate systems with exoplanet or brown dwarf secondaries and tertiaries
#--------------------------------------------------------------------------------------------------------------------
#TPS constants
precision = 1.e-10
absolute_max_mass = 100 |units.MSun
# for secondaries and tertiaries
if EXCLUDE_SSO:
absolute_min_mass = 0.0075|units.MSun
else:
absolute_min_mass = 0.2|units.MJupiter
#--------------------------------------------------------------------------------------------------------------------
|
amusecodeREPO_NAMETRESPATH_START.@TRES_extracted@TRES-main@[email protected]_END.py
|
{
"filename": "setup.py",
"repo_name": "ESA-Datalabs/XAMI-model",
"repo_path": "XAMI-model_extracted/XAMI-model-main/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
setup(
name='xami_model',
version='0.1',
packages=find_packages(),
install_requires=[
],
include_package_data=True,
package_data={
'': ['*.yaml', '*.yml', '*.png', '*.jpg'],
},
description='XAMI: XMM-Newton optical Artefact Mapping for astronomical Instance segmentation',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Elisabeta-Iulia Dima and ESA contributors',
author_email='[email protected]',
url='https://github.com/ESA-Datalabs/XAMI-model',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
)
|
ESA-DatalabsREPO_NAMEXAMI-modelPATH_START.@XAMI-model_extracted@[email protected]@.PATH_END.py
|
{
"filename": "test_photometry.py",
"repo_name": "skypyproject/skypy",
"repo_path": "skypy_extracted/skypy-main/skypy/utils/tests/test_photometry.py",
"type": "Python"
}
|
import numpy as np
import pytest
from skypy.utils.photometry import HAS_SPECLITE
def test_magnitude_functions():
from skypy.utils.photometry import (luminosity_in_band,
luminosity_from_absolute_magnitude,
absolute_magnitude_from_luminosity)
# convert between absolute luminosity and magnitude
assert np.isclose(luminosity_from_absolute_magnitude(-22), 630957344.5)
assert np.isclose(absolute_magnitude_from_luminosity(630957344.5), -22)
# convert with standard luminosities
for ref, mag in luminosity_in_band.items():
assert np.isclose(luminosity_from_absolute_magnitude(mag, ref), 1.0)
assert np.isclose(absolute_magnitude_from_luminosity(1.0, ref), mag)
# error when unknown reference is used
with pytest.raises(KeyError):
luminosity_from_absolute_magnitude(0., 'unknown')
with pytest.raises(KeyError):
absolute_magnitude_from_luminosity(1., 'unknown')
@pytest.mark.skipif(not HAS_SPECLITE, reason='test requires speclite')
def test_mag_ab_standard_source():
from astropy import units
from speclite.filters import FilterResponse
from skypy.utils.photometry import mag_ab
# create a filter
filt_lam = np.logspace(0, 4, 1000)*units.AA
filt_tx = np.exp(-((filt_lam - 1000*units.AA)/(100*units.AA))**2)
filt_tx[[0, -1]] = 0
FilterResponse(wavelength=filt_lam, response=filt_tx,
meta=dict(group_name='test', band_name='filt'))
# test that the AB standard source has zero magnitude
lam = filt_lam # same grid to prevent interpolation issues
flam = 0.10885464149979998*units.Unit('erg s-1 cm-2 AA')/lam**2
m = mag_ab(lam, flam, 'test-filt')
assert np.isclose(m, 0)
@pytest.mark.skipif(not HAS_SPECLITE, reason='test requires speclite')
def test_mag_ab_redshift_dependence():
from astropy import units
from speclite.filters import FilterResponse
from skypy.utils.photometry import mag_ab
# make a wide tophat bandpass
filt_lam = [1.0e-10, 1.1e-10, 1.0e0, 0.9e10, 1.0e10]
filt_tx = [0., 1., 1., 1., 0.]
FilterResponse(wavelength=filt_lam, response=filt_tx,
meta=dict(group_name='test', band_name='filt'))
# create a narrow gaussian source
lam = np.logspace(-11, 11, 1000)*units.AA
flam = np.exp(-((lam - 100*units.AA)/(10*units.AA))**2)*units.Unit('erg s-1 cm-2 AA-1')
# array of redshifts
z = np.linspace(0, 1, 11)
# compute the AB magnitude at different redshifts
m = mag_ab(lam, flam, 'test-filt', redshift=z)
# compare with expected redshift dependence
np.testing.assert_allclose(m, m[0] - 2.5*np.log10(1 + z))
@pytest.mark.skipif(not HAS_SPECLITE, reason='test requires speclite')
def test_mag_ab_multi():
from astropy import units
from skypy.utils.photometry import mag_ab
from speclite.filters import FilterResponse
# 5 redshifts
z = np.linspace(0, 1, 5)
# 2 Gaussian bandpasses
filt_lam = np.logspace(0, 4, 1000) * units.AA
filt_mean = np.array([[1000], [2000]]) * units.AA
filt_width = np.array([[100], [10]]) * units.AA
filt_tx = np.exp(-((filt_lam-filt_mean)/filt_width)**2)
filt_tx[:, [0, -1]] = 0
FilterResponse(wavelength=filt_lam, response=filt_tx[0],
meta=dict(group_name='test', band_name='filt0'))
FilterResponse(wavelength=filt_lam, response=filt_tx[1],
meta=dict(group_name='test', band_name='filt1'))
# 3 Flat Spectra
# to prevent issues with interpolation, collect all redshifted filt_lam
lam = []
for z_ in z:
lam = np.union1d(lam, filt_lam.value/(1+z_))
lam = lam*filt_lam.unit
A = np.array([[2], [3], [4]])
flam = A * 0.10885464149979998*units.Unit('erg s-1 cm-2 AA')/lam**2
# Compare calculated magnitudes with truth
magnitudes = mag_ab(lam, flam, ['test-filt0', 'test-filt1'], redshift=z)
truth = -2.5 * np.log10(A * (1+z)).T[:, :, np.newaxis]
assert magnitudes.shape == (5, 3, 2)
np.testing.assert_allclose(*np.broadcast_arrays(magnitudes, truth), rtol=1e-4)
@pytest.mark.skipif(not HAS_SPECLITE, reason='test requires speclite')
def test_template_spectra():
from astropy import units
from skypy.utils.photometry import mag_ab, SpectrumTemplates
from astropy.cosmology import Planck15
from speclite.filters import FilterResponse
class TestTemplates(SpectrumTemplates):
'''Three flat templates'''
def __init__(self):
self.wavelength = np.logspace(-1, 4, 1000)*units.AA
A = np.array([[2], [3], [4]]) * 0.10885464149979998
self.templates = A * units.Unit('erg s-1 cm-2 AA') / self.wavelength**2
test_templates = TestTemplates()
lam, flam = test_templates.wavelength, test_templates.templates
# Gaussian bandpass
filt_lam = np.logspace(0, 4, 1000)*units.AA
filt_tx = np.exp(-((filt_lam - 1000*units.AA)/(100*units.AA))**2)
filt_tx[[0, -1]] = 0
FilterResponse(wavelength=filt_lam, response=filt_tx,
meta=dict(group_name='test', band_name='filt'))
# Each test galaxy is exactly one of the templates
coefficients = np.eye(3)
# Test absolute magnitudes
mt = test_templates.absolute_magnitudes(coefficients, 'test-filt')
m = mag_ab(lam, flam, 'test-filt')
np.testing.assert_allclose(mt, m)
# Test apparent magnitudes
redshift = np.array([1, 2, 3])
dm = Planck15.distmod(redshift).value
mt = test_templates.apparent_magnitudes(coefficients, redshift, 'test-filt', Planck15)
np.testing.assert_allclose(mt, m - 2.5*np.log10(1+redshift) + dm)
# Redshift interpolation test; linear interpolation sufficient over a small
# redshift range at low relative tolerance
z = np.linspace(0.1, 0.2, 3)
m_true = test_templates.apparent_magnitudes(coefficients, z, 'test-filt',
Planck15, resolution=4)
m_interp = test_templates.apparent_magnitudes(coefficients, z, 'test-filt',
Planck15, resolution=2)
np.testing.assert_allclose(m_true, m_interp, rtol=1e-5)
assert not np.all(m_true == m_interp)
@pytest.mark.skipif(HAS_SPECLITE, reason='test requires that speclite is not installed')
def test_speclite_not_installed():
"""
Regression test for #436
Test that mag_ab raises the correct exception if speclite is not insalled.
"""
from skypy.utils.photometry import mag_ab
wavelength = np.linspace(1, 10, 100)
spectrum = np.ones(10)
filter = 'bessell-B'
with pytest.raises(ImportError):
mag_ab(wavelength, spectrum, filter)
def test_magnitude_error_rykoff():
from skypy.utils.photometry import magnitude_error_rykoff
# Test broadcasting to same shape given array for each parameter and
# test for correct result.
magnitude = np.full((2, 1, 1, 1, 1), 21)
magnitude_limit = np.full((3, 1, 1, 1), 21)
magnitude_zp = np.full((5, 1, 1), 21)
a = np.full((7, 1), np.log(200))
b = np.zeros(11)
error = magnitude_error_rykoff(magnitude, magnitude_limit, magnitude_zp, a, b)
# test result
assert np.allclose(error, 0.25 / np.log(10))
# test shape
assert error.shape == (2, 3, 5, 7, 11)
# second test for result
magnitude = 20
magnitude_limit = 22.5
magnitude_zp = 25
b = 2
a = np.log(10) - 1.5 * b
error = magnitude_error_rykoff(magnitude, magnitude_limit, magnitude_zp, a, b)
assert np.isclose(error, 0.25 / np.log(10) / np.sqrt(10))
# test that error limit is returned if error is larger than error_limit
# The following set-up would give a value larger than 10
magnitude = 30
magnitude_limit = 25
magnitude_zp = 30
a = 0.5
b = 1.0
error_limit = 1
error = magnitude_error_rykoff(magnitude, magnitude_limit, magnitude_zp, a, b, error_limit)
assert error == error_limit
def test_logistic_completeness_function():
from skypy.utils.photometry import logistic_completeness_function
# Test that arguments broadcast correctly
m = np.full((2, 1, 1), 21)
m95 = np.full((3, 1), 22)
m50 = np.full(5, 23)
p = logistic_completeness_function(m, m95, m50)
assert p.shape == np.broadcast(m, m95, m50).shape
# Test result of completeness function for different given magnitudes
m95 = 24
m50 = 25
m = [np.finfo(np.float64).min, m95, m50, 2*m50-m95, np.finfo(np.float64).max]
p = logistic_completeness_function(m, m95, m50)
assert np.allclose(p, [1, 0.95, 0.5, 0.05, 0])
|
skypyprojectREPO_NAMEskypyPATH_START.@skypy_extracted@skypy-main@skypy@utils@tests@[email protected]_END.py
|
{
"filename": "testutils.py",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/tests/testutils.py",
"type": "Python"
}
|
"""
Useful collection of functions for the allantools test-suite
"""
import sys
import gzip
import numpy
# read a simple data-file with phase or frequency numbers on each line
def read_datafile(filename):
p = []
if filename[-2:]=='gz':
with gzip.open(filename,mode='rt') as f:
for line in f:
if not line.startswith("#"): # skip comments
p.append(float(line))
else:
with open(filename) as f:
for line in f:
if not line.startswith("#"): # skip comments
p.append(float(line))
return p
# read a result-file, produced by copy/paste from Stable32
# note: header-lines need to be manually commented-out with "#"
def read_resultfile(filename):
rows = []
row = []
with open(filename) as f:
for line in f:
if not line.startswith("#"): # skip comments
row = []
l2 = line.split(" ")
l2 = [_f for _f in l2 if _f]
for n in range(len(l2)):
row.append(float(l2[n]))
rows.append(row)
return rows
# parse numbers from a Stable32 result-file
# the columns are:
# AF Tau # Alpha Min Sigma Mod Totdev Max Sigma
# AF = m, averaging factor i.e. tau=m*tau0
# # = n, number of pairs in the dev calculation
# alpha = noise PSD coefficient
def read_stable32(resultfile, datarate):
devresults = read_resultfile(resultfile)
print("Read ", len(devresults), " rows from ", resultfile)
rows=[]
# parse textfile produced by Stable32
for row in devresults:
if len(row) == 7: # typical ADEV result file has 7 columns of data
d={}
d['m']= row[0]
d['tau']= row[0] * (1 / float(datarate))
d['n']=row[2]
d['alpha']=row[3]
d['dev_min']=row[4]
d['dev']=row[5]
d['dev_max']=row[6]
rows.append(d)
elif len(row) == 4: # the MTIE/TIErms results are formatted slightly differently
d={}
d['m']= row[0]
d['tau']= row[0] * (1 / float(datarate))
d['n']=row[2]
d['dev']=row[3]
rows.append(d)
return rows
def to_fractional(data):
mu = numpy.mean(data)
return [(x-mu)/mu for x in data]
# test one tau-value (i.e. one row in the result file) at a time
# test a deviation function by:
# - running the function on the datafile
# - reading the correct answers from the resultfile
# - checking that tau, n, and dev are correct
def test_row_by_row(function, datafile, datarate, resultfile, verbose=False, tolerance=1e-4, frequency=False, normalize=False):
# if Stable32 results were given with more digits we could decrease tolerance
data = read_datafile(datafile)
if normalize: # convert frequencies in Hz to fractional frequencies
data = to_fractional(data)
print("Read ", len(data), " values from ", datafile)
s32rows = read_stable32(resultfile, datarate)
print("test of function ", function )
if verbose:
print("Tau N \t DEV(Stable32) \t DEV(allantools) \t rel.error\t bias")
n_errors=0
# run allantools algorithm, row by row
for s32data in s32rows:
if frequency:
(taus2, devs2, errs2, ns2) = function(data, rate=datarate,
data_type="freq",
taus=[s32data['tau']])
else:
(taus2, devs2, errs2, ns2) = function(data, rate=datarate,
taus=[s32data['tau']])
n_errors += check_equal( s32data['n'], ns2[0] )
n_errors += check_equal( s32data['tau'], taus2[0] )
n_errors += check_approx_equal( s32data['dev'], devs2[0], tolerance=tolerance, verbose=verbose )
if verbose:
rel_error = (devs2[0] - s32data['dev']) / s32data['dev']
bias = pow(s32data['dev']/devs2[0],2)
print("%.1f %d %0.6g \t %0.6g \t %0.6f \t %0.4f OK!" % ( s32data['tau'], s32data['n'], s32data['dev'], devs2[0], rel_error,bias))
def check_equal(a,b):
try:
assert ( a == b )
return 0
except:
print("ERROR a=", a, " b=", b)
assert(0)
return 1
def check_approx_equal(a1,a2, tolerance=1e-4, verbose=False):
# check the DEV result, with a given relative error tolerance
rel_error = (a2 - a1) / a1
bias = pow(a2/a1,2)
# tolerance = 1e-4 # if Stable32 results were given with more digits we could decrease tol
try:
assert ( abs(rel_error) < tolerance )
return 0
except:
print("ERROR %0.6g \t %0.6g \t rel_err = %0.6f \t %0.4f" % ( a1, a2, rel_error, bias))
assert(0)
return 1
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@[email protected]@.PATH_END.py
|
{
"filename": "NoiseScale.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/source/NoiseScale.py",
"type": "Python"
}
|
import numpy as numpy
from astropy import units
import math
from . import Tools
from .HyperParameter import HyperParameter
from .JeffreysPrior import JeffreysPrior
__author__ = "Do Kester"
__year__ = 2020
__license__ = "GPL3"
__version__ = "2.5.3"
__url__ = "https://www.bayesicfitting.nl"
__status__ = "Perpetual Beta"
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * A JAVA version of this code was part of the Herschel Common
# * Science System (HCSS), also under GPL3.
# *
# * 2011 - 2014 Do Kester, SRON (JAVA code)
# * 2016 - 2020 Do Kester
class NoiseScale( HyperParameter ):
"""
Hyperparameter for the scale of a ScaledErrorDistribution
it is a measure of the noise.
Information about the scale of the noise is stored in his class.
It is either in the form of a fixed number, when the noise scale
is known or in the form of a Prior with limits.
By default this prior is a JeffreysPrior..
The full use of priors is reserved for Bayesian calculations as
in NestedSampler
Attributes
----------
scale : float
the value of the noiseScale. Default: 1.0
stdev : float
the standard deviation of the noise scale. Default: None
prior : Prior
the prior for the noiseScale. Default: JeffreysPrior
fixed : boolean
keep the noise scale fixed at the value given by scale.
default: True
minimum : boolean
automatic noise scaling with a minimum. default: False
"""
# *********CONSTRUCTORS***************************************************
def __init__( self, scale=1.0, isFixed=True, prior=None, limits=None,
copy=None ):
"""
Constructor.
Parameters
----------
scale : float
float value of the noise scale
isFixed : bool
True: Consider the hyperparameter as fixed
False: Optimize the parameter too (when relevant)
It might need a prior and/or limits to be set
The default prior is JeffreysPrior
prior : None or Prior
None : no prior set
Prior : the prior probability on scale
limits : None or list of 2 floats
None : no limits set
[lo,hi] : limits to be passed to the Prior.
If limits are set, the default for Prior is JeffreysPrior
copy : NoiseScale
NoiseScale to copy
"""
if limits is not None and prior is None :
prior = JeffreysPrior()
super( NoiseScale, self ).__init__( hypar=scale, isFixed=isFixed,
prior=prior, limits=limits )
self.minimum = False
if copy is not None :
self.minimum = copy.minimum
def copy( self ):
""" Return a copy. """
return NoiseScale( scale=self.scale, copy=self )
def __setattr__( self, name, value ) :
"""
Rename scale to hypar and stdevScale to stdev.
"""
if name == "scale" :
self.hypar = value
elif name == "stdevScale" :
self.stdev = value
else :
object.__setattr__( self, name, value )
def __getattr__( self, name ) :
if name == "scale" :
return self.hypar
elif name == "stdevScale" :
return self.stdev
else :
raise AttributeError( "Unknown attribute " + name )
return None
def minimumScale( self, scale=None ) :
"""
Fit the noise scale with a minimum value.
Parameters
----------
scale : float
the value of the noise scale. Default: noiseScale.scale
"""
if scale is not None : self.hypar = scale
self.minimum = True
def __str__( self ) :
return str( "Noise scale. value = %f" % self.hypar )
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@[email protected]@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choroplethmapbox/hoverlabel/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="choroplethmapbox.hoverlabel.font",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@choroplethmapbox@hoverlabel@font@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "astronomyk/SimCADO",
"repo_path": "SimCADO_extracted/SimCADO-master/simcado/tests/tests_legacy_simcado/__init__.py",
"type": "Python"
}
|
astronomykREPO_NAMESimCADOPATH_START.@SimCADO_extracted@SimCADO-master@simcado@tests@tests_legacy_simcado@[email protected]_END.py
|
|
{
"filename": "_opacitysrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/_opacitysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="opacitysrc", parent_name="bar.marker", **kwargs):
super(OpacitysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@bar@marker@[email protected]_END.py
|
{
"filename": "StepPotSetup.py",
"repo_name": "jronayne/PyTransport",
"repo_path": "PyTransport_extracted/PyTransport-master/Examples/SingleField/StepPotSetup.py",
"type": "Python"
}
|
####################################### Setup file for the Step Potential example of Chen et al. ###########################################
import sympy as sym
import numpy as np
import math
import sys
############################################################################################################################################
location = "/Users/mulryne/Dropbox/PyTransportDist/PyTransport/" # this should be the location of the PyTransport folder
sys.path.append(location) # we add this location to the python path
import PyTransSetup
### Sets potential and compiles PyTransport, users may prefer to do this only once in a separate file (or comment after running below once) ###
nF=1
nP=4
f=sym.symarray('f',nF)
p=sym.symarray('p',nP)
## example step
V = 1.0/2.0 *p[0]**2*f[0]**2*(1.0 + p[1]*(sym.tanh((f[0]-p[2])/p[3])))
PyTransSetup.potential(V,nF,nP) # differentiates this potential and writes this potential and derivatives into c file when run (can be a
# little slow, and so one may not wish to run if recompiling to alater other properties such as tols)
PyTransSetup.compileName("Step") # this compiles the module with the new potential and places it in the location folder, and adds this folder to the path ready for use
############################################################################################################################################
|
jronayneREPO_NAMEPyTransportPATH_START.@PyTransport_extracted@PyTransport-master@Examples@[email protected]@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/yaxis/tickfont/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="weight", parent_name="layout.scene.yaxis.tickfont", **kwargs
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@yaxis@tickfont@[email protected]_END.py
|
{
"filename": "test.py",
"repo_name": "mlujnie/simple",
"repo_path": "simple_extracted/simple-main/tests/test.py",
"type": "Python"
}
|
import unittest
class TestStringMethods(unittest.TestCase):
def test_import_simple(self):
from simple.simple import LognormalIntensityMock
lim = LognormalIntensityMock("./tests/test_lim_input.yaml")
print(lim.sigma_beam)
lim.run()
self.assertEqual(lim.N_mesh[0], 16)
self.assertTrue(len(lim.cat['Position']) > 0)
lim.run(skip_lognormal=True)
lim.downsample_all_meshes([8,8,8])
lim.run(skip_lognormal=True)
def test_import_pk3dmodel(self):
from simple.pk_3d_model import Power_Spectrum_Model
from simple.simple import LognormalIntensityMock
lim = LognormalIntensityMock("./tests/test_lim_input.yaml")
pk3d = Power_Spectrum_Model(
"./tests/test_lim_input.yaml", do_model_shot_noise=False)
self.assertEqual(lim.N_mesh[0], pk3d.N_mesh[0])
print(pk3d.do_model_shot_noise)
if __name__ == '__main__':
unittest.main()
|
mlujnieREPO_NAMEsimplePATH_START.@simple_extracted@simple-main@[email protected]@.PATH_END.py
|
{
"filename": "_len.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/colorbar/_len.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="len", parent_name="choroplethmap.colorbar", **kwargs
):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@colorbar@[email protected]_END.py
|
{
"filename": "_namelengthsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/node/hoverlabel/_namelengthsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="namelengthsrc",
parent_name="sankey.node.hoverlabel",
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@node@hoverlabel@[email protected]_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/hoverlabel/font/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="funnel.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@funnel@hoverlabel@font@[email protected]_END.py
|
{
"filename": "_cobyqa_py.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/optimize/_cobyqa_py.py",
"type": "Python"
}
|
import numpy as np
from threading import Lock
from ._optimize import _check_unknown_options
COBYQA_LOCK = Lock()
def _minimize_cobyqa(fun, x0, args=(), bounds=None, constraints=(),
callback=None, disp=False, maxfev=None, maxiter=None,
f_target=-np.inf, feasibility_tol=1e-8,
initial_tr_radius=1.0, final_tr_radius=1e-6, scale=False,
**unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Quadratic Approximations (COBYQA) algorithm [1]_.
.. versionadded:: 1.14.0
Options
-------
disp : bool
Set to True to print information about the optimization procedure.
Default is ``False``.
maxfev : int
Maximum number of function evaluations. Default is ``500 * n``, where
``n`` is the number of variables.
maxiter : int
Maximum number of iterations. Default is ``1000 * n``, where ``n`` is
the number of variables.
f_target : float
Target value for the objective function. The optimization procedure is
terminated when the objective function value of a feasible point (see
`feasibility_tol` below) is less than or equal to this target. Default
is ``-numpy.inf``.
feasibility_tol : float
Absolute tolerance for the constraint violation. Default is ``1e-8``.
initial_tr_radius : float
Initial trust-region radius. Typically, this value should be in the
order of one tenth of the greatest expected change to the variables.
Default is ``1.0``.
final_tr_radius : float
Final trust-region radius. It should indicate the accuracy required in
the final values of the variables. If provided, this option overrides
the value of `tol` in the `minimize` function. Default is ``1e-6``.
scale : bool
Set to True to scale the variables according to the bounds. If True and
if all the lower and upper bounds are finite, the variables are scaled
to be within the range :math:`[-1, 1]`. If any of the lower or upper
bounds is infinite, the variables are not scaled. Default is ``False``.
References
----------
.. [1] COBYQA
https://www.cobyqa.com/stable/
"""
from .._lib.cobyqa import minimize # import here to avoid circular imports
_check_unknown_options(unknown_options)
options = {
'disp': bool(disp),
'maxfev': int(maxfev) if maxfev is not None else 500 * len(x0),
'maxiter': int(maxiter) if maxiter is not None else 1000 * len(x0),
'target': float(f_target),
'feasibility_tol': float(feasibility_tol),
'radius_init': float(initial_tr_radius),
'radius_final': float(final_tr_radius),
'scale': bool(scale),
}
with COBYQA_LOCK:
return minimize(fun, x0, args, bounds, constraints, callback, options)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@optimize@[email protected]_END.py
|
{
"filename": "repackage_great3_cut_info.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/devel/external/repackage_great3_cut_info.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
"""
The purpose of this script is to take data from assorted GREAT3-related files that store data about
the COSMOS galaxies and their parametric fits, and combine them into a single file that GalSim will
use to make cuts in the COSMOSCatalog class.
Set up to point to data that live in a particular spot, so likely nobody else can actually run this
script.
"""
import numpy as np
import pyfits
import os
data_dir = '/Users/rmandelb/great3/data-23.5'
dmag_file = 'real_galaxy_deltamag_info.fits'
sn_file = 'real_galaxy_image_selection_info.fits'
mask_file = 'real_galaxy_mask_info.fits'
out_file = 'real_galaxy_catalog_23.5_selection.fits'
# Load the appropriate data from each file.
dat = pyfits.getdata(os.path.join(data_dir, dmag_file))
ident = dat['IDENT']
dmag = dat['delta_mag']
dat = pyfits.getdata(os.path.join(data_dir, sn_file))
sn_ellip_gauss = dat['sn_ellip_gauss']
dat = pyfits.getdata(os.path.join(data_dir, mask_file))
min_mask_dist_pixels = dat['min_mask_dist_pixels']
average_mask_adjacent_pixel_count = dat['average_mask_adjacent_pixel_count']
peak_image_pixel_count = dat['peak_image_pixel_count']
# Stick them together into a single FITS table.
tbhdu = pyfits.new_table(pyfits.ColDefs([
pyfits.Column(name='IDENT',
format='J',
array=ident),
pyfits.Column(name='dmag',
format='D',
array=dmag),
pyfits.Column(name='sn_ellip_gauss',
format='D',
array=sn_ellip_gauss),
pyfits.Column(name='min_mask_dist_pixels',
format='D',
array=min_mask_dist_pixels),
pyfits.Column(name='average_mask_adjacent_pixel_count',
format='D',
array=average_mask_adjacent_pixel_count),
pyfits.Column(name='peak_image_pixel_count',
format='D',
array=peak_image_pixel_count)]
))
# Output to file.
out_file = os.path.join(data_dir, out_file)
print "Writing to file ",out_file
tbhdu.writeto(out_file)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@devel@external@[email protected]_END.py
|
{
"filename": "test_stretch_state_mixin.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/viewers/common/tests/test_stretch_state_mixin.py",
"type": "Python"
}
|
import pytest
from astropy.visualization import LinearStretch, LogStretch
from glue.core.state_objects import State
from glue.viewers.common.stretch_state_mixin import StretchStateMixin
class ExampleStateWithStretch(State, StretchStateMixin):
pass
def test_not_set_up():
state = ExampleStateWithStretch()
with pytest.raises(Exception, match="setup_stretch_callback has not been called"):
state.stretch_object
class TestStretchStateMixin:
def setup_method(self, method):
self.state = ExampleStateWithStretch()
self.state.setup_stretch_callback()
def test_defaults(self):
assert self.state.stretch == "linear"
assert len(self.state.stretch_parameters) == 0
assert isinstance(self.state.stretch_object, LinearStretch)
def test_change_stretch(self):
self.state.stretch = "log"
assert self.state.stretch == "log"
assert len(self.state.stretch_parameters) == 0
assert isinstance(self.state.stretch_object, LogStretch)
def test_invalid_parameter(self):
with pytest.raises(
ValueError, match="Stretch object LinearStretch has no attribute foo"
):
self.state.stretch_parameters["foo"] = 1
def test_set_parameter(self):
pytest.importorskip('astropy', minversion='6.0')
self.state.stretch = "log"
assert self.state.stretch_object.a == 1000
# Setting the stretch parameter 'exp' is synced with the stretch object attribute
self.state.stretch_parameters["a"] = 200
assert self.state.stretch_object.a == 200
# Changing stretch resets the stretch parameter dictionary
self.state.stretch = "linear"
assert len(self.state.stretch_parameters) == 0
# And there is no memory of previous parameters
self.state.stretch = "log"
assert self.state.stretch_object.a == 1000
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@viewers@common@tests@[email protected]_END.py
|
{
"filename": "_templateitemname.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/updatemenu/button/_templateitemname.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="layout.updatemenu.button",
**kwargs,
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@updatemenu@button@[email protected]_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolargl/marker/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scatterpolargl.marker.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
colorscale_path=kwargs.pop(
"colorscale_path", "scatterpolargl.marker.line.colorscale"
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolargl@marker@line@[email protected]_END.py
|
{
"filename": "_bordercolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/link/hoverlabel/_bordercolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="bordercolorsrc",
parent_name="sankey.link.hoverlabel",
**kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@link@hoverlabel@[email protected]_END.py
|
{
"filename": "RI_imaging_QuantifAI_MAP_estimation.ipynb",
"repo_name": "astro-informatics/QuantifAI",
"repo_path": "QuantifAI_extracted/QuantifAI-main/examples/RI_imaging_QuantifAI_MAP_estimation.ipynb",
"type": "Jupyter Notebook"
}
|
# Compute the MAP example with the `QuantifAI` model
In this notebook we:
- set hyperparameters,
- prepare the synthetic observations,
- define the model, likelihood and prior,
- estimate the MAP reconstruction through a convex optimisation algorithm,
- plot the MAP estimation result and the error.
```python
import os
import numpy as np
import time as time
# Import torch and select GPU
import torch
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print(torch.cuda.is_available())
print(torch.cuda.device_count())
print(torch.cuda.current_device())
print(torch.cuda.get_device_name(torch.cuda.current_device()))
# Plot functions
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Radio and convex reg functions
import quantifai as qai
from quantifai.utils import to_numpy, to_tensor
from convex_reg import utils as utils_cvx_reg
```
True
1
0
NVIDIA A100-PCIE-40GB
## Set hyperparameters
```python
# Parameters
# Optimisation options for the MAP estimation
options = {"tol": 1e-5, "iter": 15000, "update_iter": 100, "record_iters": False}
# Save param
repo_dir = "./.."
# Test image name from ['M31', 'W28', 'CYN', '3c288']
img_name = "M31"
# Input noise level
input_snr = 30.0
# Define my torch types (CRR requires torch.float32)
myType = torch.float32
myComplexType = torch.complex64
# CRR load parameters
sigma_training = 5
t_model = 5
CRR_dir_name = "./../trained_models/"
# CRR parameters
lmbd = 5e4 # lambda parameter
mu = 20
```
# Prepare observations
```python
# Load image and mask
img, mat_mask = qai.helpers.load_imgs(img_name, repo_dir)
# Aliases
x = img
ground_truth = img
torch_img = torch.tensor(np.copy(img), dtype=myType, device=device).reshape(
(1, 1) + img.shape
)
phi = qai.operators.MaskedFourier_torch(
shape=img.shape, ratio=0.5, mask=mat_mask, norm="ortho", device=device
)
y = phi.dir_op(torch_img).detach().cpu().squeeze().numpy()
# Define noise level
eff_sigma = qai.helpers.compute_complex_sigma_noise(y, input_snr)
sigma = eff_sigma * np.sqrt(2)
# Generate noise
rng = np.random.default_rng(seed=0)
n_re = rng.normal(0, eff_sigma, y[y != 0].shape)
n_im = rng.normal(0, eff_sigma, y[y != 0].shape)
# Add noise
y[y != 0] += n_re + 1.0j * n_im
# Observation
torch_y = torch.tensor(np.copy(y), device=device, dtype=myComplexType).reshape(
(1,) + img.shape
)
# Generate first guess
x_init = torch.abs(phi.adj_op(torch_y))
```
WARNING: The following header keyword is invalid or follows an unrecognized non-standard convention:
INSTRUME [astropy.io.fits.card]
# Define likelihood and prior
```python
# Define the likelihood
likelihood = qai.operators.L2Norm_torch(
sigma=sigma,
data=torch_y,
Phi=phi,
)
# Lipschitz constant computed automatically by g, stored in g.beta
# Define real prox
prox_op = qai.operators.RealProx_torch()
```
```python
# Load CRR model
torch.set_grad_enabled(False)
# torch.set_num_threads(4)
exp_name = f"Sigma_{sigma_training}_t_{t_model}/"
if device.type == "cpu":
CRR_model = utils_cvx_reg.load_model(
CRR_dir_name + exp_name, "cpu", device_type="cpu"
)
elif device.type == "cuda":
CRR_model = utils_cvx_reg.load_model(
CRR_dir_name + exp_name, "cuda", device_type="gpu"
)
print(f"Numbers of parameters before prunning: {CRR_model.num_params}")
CRR_model.prune()
print(f"Numbers of parameters after prunning: {CRR_model.num_params}")
```
--- loading checkpoint from epoch 10 ---
---------------------
Building a CRR-NN model with
- [1, 8, 32] channels
- linear_spline activation functions
(LinearSpline(mode=conv, num_activations=32, init=zero, size=21, grid=0.010, monotonic_constraint=True.))
---------------------
Numbers of parameters before prunning: 13610
---------------------
PRUNNING
Found 22 filters with non-vanishing potential functions
---------------------
Numbers of parameters after prunning: 4183
```python
# [not required] intialize the eigen vector of dimension (size, size) associated to the largest eigen value
CRR_model.initializeEigen(size=100)
# compute bound via a power iteration which couples the activations and the convolutions
L_CRR = CRR_model.precise_lipschitz_bound(n_iter=100)
# the bound is stored in the model
# L_CRR = model.L.data.item()
print(f"Lipschitz bound {L_CRR:.3f}")
```
Lipschitz bound 0.781
# Run optimisation algorithm anc compute the MAP reconstruction
```python
# Compute stepsize
alpha = 0.98 / (likelihood.beta + mu * lmbd * L_CRR)
x_hat = qai.optim.FISTA_CRR_torch(
x_init=x_init,
options=options,
likelihood=likelihood,
prox_op=prox_op,
CRR_model=CRR_model,
alpha=alpha,
lmbd=lmbd,
mu=mu,
)
```
[GD] 0 out of 15000 iterations, tol = 0.017048
[GD] 100 out of 15000 iterations, tol = 0.002369
[GD] 200 out of 15000 iterations, tol = 0.000744
[GD] 300 out of 15000 iterations, tol = 0.000317
[GD] 400 out of 15000 iterations, tol = 0.000190
[GD] 500 out of 15000 iterations, tol = 0.000064
[GD] 600 out of 15000 iterations, tol = 0.000080
[GD] 700 out of 15000 iterations, tol = 0.000037
[GD] 800 out of 15000 iterations, tol = 0.000037
[GD] 900 out of 15000 iterations, tol = 0.000024
[GD] 1000 out of 15000 iterations, tol = 0.000012
[GD] converged in 1005 iterations
# Plot MAP reconstruction
```python
# Convert to numpy
np_x_init = to_numpy(x_init)
x_map = x_hat.clone()
x_gt = np.copy(x)
np_x_gt = np.copy(x)
np_x_map = to_numpy(x_map)
# Need to replace zero values with veery small numbers for the log plots
np_x_gt[np_x_gt == 0] = np.random.rand(np.sum(np_x_gt == 0)) * 1e-7
images = [np_x_gt, np_x_init, np_x_map, np_x_gt - np.abs(np_x_map)]
labels = ["Ground truth", "Dirty", "MAP reconstruction", "Residual"]
vmin_log = [-2.0, -2.0, -2.0, -3.0]
cmap = "cubehelix"
fig, axs = plt.subplots(1, 4, figsize=(26, 8), dpi=200)
for i in range(4):
im = axs[i].imshow(np.log10(np.abs(images[i])), cmap=cmap, vmax=0, vmin=vmin_log[i])
divider = make_axes_locatable(axs[i])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax, orientation="vertical")
cbar.ax.tick_params(labelsize=14)
if i > 0:
stats_str = "\n(SNR: {})".format(
round(qai.utils.eval_snr(x, images[i]), 2),
)
labels[i] += stats_str
axs[i].set_title(labels[i], fontsize=20)
axs[i].axis("off")
plt.show()
```

```python
```
```python
```
```python
```
|
astro-informaticsREPO_NAMEQuantifAIPATH_START.@QuantifAI_extracted@QuantifAI-main@examples@[email protected]_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnel/marker/_line.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="funnel.marker", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnel@marker@[email protected]_END.py
|
{
"filename": "executor.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/core/_numba/executor.py",
"type": "Python"
}
|
from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
Any,
)
if TYPE_CHECKING:
from collections.abc import Callable
from pandas._typing import Scalar
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import jit_user_function
@functools.cache
def generate_apply_looper(func, nopython=True, nogil=True, parallel=False):
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
nb_compat_func = jit_user_function(func)
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def nb_looper(values, axis, *args):
# Operate on the first row/col in order to get
# the output shape
if axis == 0:
first_elem = values[:, 0]
dim0 = values.shape[1]
else:
first_elem = values[0]
dim0 = values.shape[0]
res0 = nb_compat_func(first_elem, *args)
# Use np.asarray to get shape for
# https://github.com/numba/numba/issues/4202#issuecomment-1185981507
buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape
if axis == 0:
buf_shape = buf_shape[::-1]
buff = np.empty(buf_shape)
if axis == 1:
buff[0] = res0
for i in numba.prange(1, values.shape[0]):
buff[i] = nb_compat_func(values[i], *args)
else:
buff[:, 0] = res0
for j in numba.prange(1, values.shape[1]):
buff[:, j] = nb_compat_func(values[:, j], *args)
return buff
return nb_looper
@functools.cache
def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel):
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
if is_grouped_kernel:
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def column_looper(
values: np.ndarray,
labels: np.ndarray,
ngroups: int,
min_periods: int,
*args,
):
result = np.empty((values.shape[0], ngroups), dtype=result_dtype)
na_positions = {}
for i in numba.prange(values.shape[0]):
output, na_pos = func(
values[i], result_dtype, labels, ngroups, min_periods, *args
)
result[i] = output
if len(na_pos) > 0:
na_positions[i] = np.array(na_pos)
return result, na_positions
else:
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def column_looper(
values: np.ndarray,
start: np.ndarray,
end: np.ndarray,
min_periods: int,
*args,
):
result = np.empty((values.shape[0], len(start)), dtype=result_dtype)
na_positions = {}
for i in numba.prange(values.shape[0]):
output, na_pos = func(
values[i], result_dtype, start, end, min_periods, *args
)
result[i] = output
if len(na_pos) > 0:
na_positions[i] = np.array(na_pos)
return result, na_positions
return column_looper
default_dtype_mapping: dict[np.dtype, Any] = {
np.dtype("int8"): np.int64,
np.dtype("int16"): np.int64,
np.dtype("int32"): np.int64,
np.dtype("int64"): np.int64,
np.dtype("uint8"): np.uint64,
np.dtype("uint16"): np.uint64,
np.dtype("uint32"): np.uint64,
np.dtype("uint64"): np.uint64,
np.dtype("float32"): np.float64,
np.dtype("float64"): np.float64,
np.dtype("complex64"): np.complex128,
np.dtype("complex128"): np.complex128,
}
# TODO: Preserve complex dtypes
float_dtype_mapping: dict[np.dtype, Any] = {
np.dtype("int8"): np.float64,
np.dtype("int16"): np.float64,
np.dtype("int32"): np.float64,
np.dtype("int64"): np.float64,
np.dtype("uint8"): np.float64,
np.dtype("uint16"): np.float64,
np.dtype("uint32"): np.float64,
np.dtype("uint64"): np.float64,
np.dtype("float32"): np.float64,
np.dtype("float64"): np.float64,
np.dtype("complex64"): np.float64,
np.dtype("complex128"): np.float64,
}
identity_dtype_mapping: dict[np.dtype, Any] = {
np.dtype("int8"): np.int8,
np.dtype("int16"): np.int16,
np.dtype("int32"): np.int32,
np.dtype("int64"): np.int64,
np.dtype("uint8"): np.uint8,
np.dtype("uint16"): np.uint16,
np.dtype("uint32"): np.uint32,
np.dtype("uint64"): np.uint64,
np.dtype("float32"): np.float32,
np.dtype("float64"): np.float64,
np.dtype("complex64"): np.complex64,
np.dtype("complex128"): np.complex128,
}
def generate_shared_aggregator(
func: Callable[..., Scalar],
dtype_mapping: dict[np.dtype, np.dtype],
is_grouped_kernel: bool,
nopython: bool,
nogil: bool,
parallel: bool,
):
"""
Generate a Numba function that loops over the columns 2D object and applies
a 1D numba kernel over each column.
Parameters
----------
func : function
aggregation function to be applied to each column
dtype_mapping: dict or None
If not None, maps a dtype to a result dtype.
Otherwise, will fall back to default mapping.
is_grouped_kernel: bool, default False
Whether func operates using the group labels (True)
or using starts/ends arrays
If true, you also need to pass the number of groups to this function
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
"""
# A wrapper around the looper function,
# to dispatch based on dtype since numba is unable to do that in nopython mode
# It also post-processes the values by inserting nans where number of observations
# is less than min_periods
# Cannot do this in numba nopython mode
# (you'll run into type-unification error when you cast int -> float)
def looper_wrapper(
values,
start=None,
end=None,
labels=None,
ngroups=None,
min_periods: int = 0,
**kwargs,
):
result_dtype = dtype_mapping[values.dtype]
column_looper = make_looper(
func, result_dtype, is_grouped_kernel, nopython, nogil, parallel
)
# Need to unpack kwargs since numba only supports *args
if is_grouped_kernel:
result, na_positions = column_looper(
values, labels, ngroups, min_periods, *kwargs.values()
)
else:
result, na_positions = column_looper(
values, start, end, min_periods, *kwargs.values()
)
if result.dtype.kind == "i":
# Look if na_positions is not empty
# If so, convert the whole block
# This is OK since int dtype cannot hold nan,
# so if min_periods not satisfied for 1 col, it is not satisfied for
# all columns at that index
for na_pos in na_positions.values():
if len(na_pos) > 0:
result = result.astype("float64")
break
# TODO: Optimize this
for i, na_pos in na_positions.items():
if len(na_pos) > 0:
result[i, na_pos] = np.nan
return result
return looper_wrapper
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@core@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "andizq/sf3dmodels",
"repo_path": "sf3dmodels_extracted/sf3dmodels-master/sf3dmodels/tools/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package provides varied useful tools (Under development).
"""
# Packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from .._astropy_init import *
# ----------------------------------------------------------------------------
if not _ASTROPY_SETUP_:
# For egg_info test builds to pass, put package imports here.
#from .example_mod import *
from . import transform
from .core import formatter
__all__ = ['transform', 'formatter']
|
andizqREPO_NAMEsf3dmodelsPATH_START.@sf3dmodels_extracted@sf3dmodels-master@sf3dmodels@tools@[email protected]_END.py
|
{
"filename": "_rangebreaks.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/yaxis/_rangebreaks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RangebreaksValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="rangebreaks", parent_name="layout.yaxis", **kwargs):
super(RangebreaksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The
default is one day in milliseconds.
enabled
Determines whether this axis rangebreak is
enabled or disabled. Please note that
`rangebreaks` only work for "date" axis type.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pattern
Determines a pattern on the time line that
generates breaks. If *day of week* - days of
the week in English e.g. 'Sunday' or `sun`
(matching is case-insensitive and considers
only the first three characters), as well as
Sunday-based integers between 0 and 6. If
"hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info.
Examples: - { pattern: 'day of week', bounds:
[6, 1] } or simply { bounds: ['sat', 'mon'] }
breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8]
} breaks from 5pm to 8am (i.e. skips non-work
hours).
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use
`dvalue` to set the size of the values along
the axis.
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@yaxis@[email protected]_END.py
|
{
"filename": "records.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/core/records.py",
"type": "Python"
}
|
"""
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import warnings
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import isfileobj, bytes, long, unicode, os_fspath
from numpy.core.overrides import set_module
from .arrayprint import get_printoptions
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i + 1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
@set_module('numpy')
class format_parser(object):
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),
(('T3', 'col3'), '|S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])
>>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [list, tuple]):
pass
elif isinstance(names, (str, unicode)):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None] * (self._nfields - len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
if get_printoptions()['legacy'] == '1.13':
return self.__str__()
return super(record, self).__repr__()
def __str__(self):
if get_printoptions()['legacy'] == '1.13':
return str(self.item())
return super(record, self).__str__()
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
#happens if field is Object type
return obj
if dt.names is not None:
return obj.view((self.__class__, obj.dtype))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __getitem__(self, indx):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
if isinstance(obj, nt.void) and obj.dtype.names is not None:
return obj.view((self.__class__, obj.dtype))
else:
# return a single element
return obj
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
fmt = '%% %ds: %%s' % maxlen
rows = [fmt % (name, getattr(self, name)) for name in names]
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __array_finalize__(self, obj):
if self.dtype.type is not record and self.dtype.names is not None:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. Then if obj is structured
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
if obj.dtype.names is not None:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except Exception:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = super(recarray, self).__getitem__(indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.names is not None:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
repr_dtype = self.dtype
if (self.dtype.type is record
or (not issubclass(self.dtype.type, nt.void))):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, convert back
# to non-record before printing
if repr_dtype.type is record:
repr_dtype = sb.dtype((nt.void, repr_dtype))
prefix = "rec.array("
fmt = 'rec.array(%s,%sdtype=%s)'
else:
# otherwise represent it using np.array plus a view
# This should only happen if the user is playing
# strange games with dtypes.
prefix = "array("
fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = sb.array2string(
self, separator=', ', prefix=prefix, suffix=',')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
lf = '\n'+' '*len(prefix)
if get_printoptions()['legacy'] == '1.13':
lf = ' ' + lf # trailing space
return fmt % (lst, lf, repr_dtype)
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.names is not None:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print(r[1])
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = []
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError("item in the array list must be an ndarray.")
formats.append(obj.dtype.str)
formats = ','.join(formats)
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = descr[k].ndim
testshape = obj.shape[:obj.ndim - nn]
if testshape != shape:
raise ValueError("array-shape mismatch in array %d" % k)
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print(r[0])
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
>>> print(pickle.loads(pickle.dumps(r)))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype=descr)
except (TypeError, ValueError):
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
# list of lists instead of list of tuples ?
# 2018-02-07, 1.14.1
warnings.warn(
"fromrecords expected a list of tuples, may have received a list "
"of lists instead. In the future that will raise an error",
FutureWarning, stacklevel=2)
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string or a path-like object then that file is opened,
else it is assumed to be a file object. The file object must
support random access (i.e. it must have tell and seek methods).
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(r[5])
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if dtype is None and formats is None:
raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
if isfileobj(fd):
# file already opened
name = 0
else:
# open file
fd = open(os_fspath(fd), 'rb')
name = 1
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod(dtype=nt.intp)
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[shape.index(-1)] = size // -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod(dtype=nt.intp)
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
(formats is None) and (dtype is None)):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names': names,
'titles': titles,
'aligned': aligned,
'byteorder': byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isfileobj(obj):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new.view(recarray)
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
return obj.view(recarray)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@[email protected]@.PATH_END.py
|
{
"filename": "create_draft_message.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/tools/office365/create_draft_message.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365CreateDraftMessage
from langchain_community.tools.office365.create_draft_message import (
CreateDraftMessageSchema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CreateDraftMessageSchema": (
"langchain_community.tools.office365.create_draft_message"
),
"O365CreateDraftMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CreateDraftMessageSchema",
"O365CreateDraftMessage",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@tools@office365@[email protected]_END.py
|
{
"filename": "prop_define_entrance.py",
"repo_name": "ajeldorado/falco-python",
"repo_path": "falco-python_extracted/falco-python-master/falco/proper/prop_define_entrance.py",
"type": "Python"
}
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import falco.proper as proper
import numpy as np
def prop_define_entrance(wf):
"""Establish entrance aperture function for later use by pyPROPER routines.
The input image describes the entrance aperture amplitude (0 to 1). This
routine then normalizes the wavefront to have a total intensity of one.
Parameters
----------
wf : obj
WaveFront class object
Returns
-------
None
"""
total_original_pupil = np.sum(np.abs(wf.wfarr)**2)
proper.total_original_pupil = total_original_pupil
wf.wfarr /= np.sqrt(total_original_pupil)
return
|
ajeldoradoREPO_NAMEfalco-pythonPATH_START.@falco-python_extracted@falco-python-master@falco@proper@[email protected]_END.py
|
{
"filename": "setup.py",
"repo_name": "ryanhausen/fitsmap",
"repo_path": "fitsmap_extracted/fitsmap-master/setup.py",
"type": "Python"
}
|
"""
MIT License
Copyright 2023 Ryan Hausen and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# https://pythonhosted.org/an_example_pypi_project/setuptools.html
import os
from setuptools import setup, find_packages
def read(fname):
"""Helper for README file."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open("./fitsmap/__version__.py", "r") as f:
version = f.readlines()[0].strip().replace('"', "")
REQUIRES = [
"astropy",
"cbor2",
"numpy",
"matplotlib",
"pillow",
"ray",
"scikit-image",
"tqdm",
"click",
"protobuf<4.21", # mapbox-vector-tile requires protobuf<4.21,>=3.0.0
"mapbox_vector_tile",
]
setup(
name="fitsmap",
version=version,
author="Ryan Hausen",
author_email="[email protected]",
description=("Turn fits files/catalogs into a leafletjs map"),
license="MIT",
keywords="tools fits leaflet",
url="https://github.com/ryanhausen/fitsmap",
packages=find_packages(exclude="fitsmap.tests"),
include_package_data=True,
install_requires=REQUIRES,
entry_points={"console_scripts": ["fitsmap=fitsmap.__main__:cli"]},
long_description=read("README.rst"),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Development Status :: 4 - Beta",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Scientific/Engineering",
],
)
|
ryanhausenREPO_NAMEfitsmapPATH_START.@fitsmap_extracted@[email protected]@.PATH_END.py
|
{
"filename": "rotations.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/modeling/rotations.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Implements rotations, including spherical rotations as defined in WCS Paper II [1]_.
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
# pylint: disable=invalid-name, too-many-arguments, no-member
import math
from functools import reduce
import numpy as np
from astropy import units as u
from astropy.coordinates.matrix_utilities import rotation_matrix
from .core import Model
from .parameters import Parameter
from .utils import _to_orig_unit, _to_radian
__all__ = [
"EulerAngleRotation",
"RotateCelestial2Native",
"RotateNative2Celestial",
"Rotation2D",
"RotationSequence3D",
"SphericalRotationSequence",
]
def _create_matrix(angles, axes_order):
matrices = []
for angle, axis in zip(angles, axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = angle.item()
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
return reduce(np.matmul, matrices[::-1])
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
class RotationSequence3D(Model):
"""
Perform a series of rotations about different axis in 3D space.
Positive angles represent a counter-clockwise rotation.
Parameters
----------
angles : array-like
Angles of rotation in deg in the order of axes_order.
axes_order : str
A sequence of 'x', 'y', 'z' corresponding to axis of rotation.
Examples
--------
>>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx')
"""
standard_broadcasting = False
_separable = False
n_inputs = 3
n_outputs = 3
angles = Parameter(
default=[],
getter=_to_orig_unit,
setter=_to_radian,
description="Angles of rotation in deg in the order of axes_order",
)
def __init__(self, angles, axes_order, name=None):
self.axes = ["x", "y", "z"]
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes} "
)
self.axes_order = axes_order
if len(angles) != len(axes_order):
raise ValueError(
f"The number of angles {len(angles)} should match "
f"the number of axes {len(axes_order)}."
)
super().__init__(angles, name=name)
self._inputs = ("x", "y", "z")
self._outputs = ("x", "y", "z")
@property
def inverse(self):
"""Inverse rotation."""
angles = self.angles.value[::-1] * -1
return self.__class__(angles, axes_order=self.axes_order[::-1])
def evaluate(self, x, y, z, angles):
"""
Apply the rotation to a set of 3D Cartesian coordinates.
"""
if x.shape != y.shape or x.shape != z.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.ravel(), y.ravel(), z.ravel()])
result = np.dot(_create_matrix(angles[0], self.axes_order), inarr)
x, y, z = result[0], result[1], result[2]
x.shape = y.shape = z.shape = orig_shape
return x, y, z
class SphericalRotationSequence(RotationSequence3D):
"""
Perform a sequence of rotations about arbitrary number of axes
in spherical coordinates.
Parameters
----------
angles : list
A sequence of angles (in deg).
axes_order : str
A sequence of characters ('x', 'y', or 'z') corresponding to the
axis of rotation and matching the order in ``angles``.
"""
def __init__(self, angles, axes_order, name=None, **kwargs):
self._n_inputs = 2
self._n_outputs = 2
super().__init__(angles, axes_order=axes_order, name=name, **kwargs)
self._inputs = ("lon", "lat")
self._outputs = ("lon", "lat")
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
def evaluate(self, lon, lat, angles):
x, y, z = spherical2cartesian(lon, lat)
x1, y1, z1 = super().evaluate(x, y, z, angles)
lon, lat = cartesian2spherical(x1, y1, z1)
return lon, lat
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray):
alpha = alpha.ravel()
delta = delta.ravel()
shape = alpha.shape
inp = spherical2cartesian(alpha, delta)
matrix = _create_matrix([phi, theta, psi], axes_order)
result = np.dot(matrix, inp)
a, b = cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity` ['angle']
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
n_inputs = 2
n_outputs = 2
phi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="1st Euler angle (Quantity or value in deg)",
)
theta = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="2nd Euler angle (Quantity or value in deg)",
)
psi = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="3rd Euler angle (Quantity or value in deg)",
)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ["x", "y", "z"]
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3, "
f"got {axes_order}"
)
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes}"
)
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
self._inputs = ("alpha", "delta")
self._outputs = ("alpha", "delta")
@property
def inverse(self):
return self.__class__(
phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1],
)
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(
default=0, getter=_to_orig_unit, setter=_to_radian, description="Latitude"
)
lat = Parameter(
default=0, getter=_to_orig_unit, setter=_to_radian, description="Longtitude"
)
lon_pole = Parameter(
default=0,
getter=_to_orig_unit,
setter=_to_radian,
description="Longitude of a pole",
)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError(
"All parameters should be of the same type - float or Quantity."
)
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = "zxz"
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole, self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they
should be in units of deg. Inputs are angles on the native sphere.
Outputs are angles on the celestial sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
self.inputs = ("phi_N", "theta_N")
self.outputs = ("alpha_C", "delta_C")
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles in the Native coordinate system.
it is assumed that numerical only inputs are in degrees.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles on the Celestial sphere.
If float, in degrees.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = -(np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be
in units of deg. Inputs are angles on the celestial sphere.
Outputs are angles on the native sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
# Inputs are angles on the celestial sphere
self.inputs = ("alpha_C", "delta_C")
# Outputs are angles on the native sphere
self.outputs = ("phi_N", "theta_N")
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles in the Celestial coordinate frame.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles on the Native sphere.
If float, in degrees.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = np.pi / 2 + lon
theta = np.pi / 2 - lat
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotation (if float it should be in deg).
"""
n_inputs = 2
n_outputs = 2
_separable = False
angle = Parameter(
default=0.0,
getter=_to_orig_unit,
setter=_to_radian,
description="Angle of rotation (Quantity or value in deg)",
)
def __init__(self, angle=angle, **kwargs):
super().__init__(angle=angle, **kwargs)
self._inputs = ("x", "y")
self._outputs = ("x", "y")
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : array-like
Input quantities
angle : float or `~astropy.units.Quantity` ['angle']
Angle of rotations.
If float, assumed in degrees.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, "unit", None)
y_unit = getattr(y, "unit", None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.ravel(), y.ravel()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit, subok=True), u.Quantity(
y, unit=y_unit, subok=True
)
return x, y
@staticmethod
def _compute_matrix(angle):
if not np.isscalar(angle):
angle = angle[0]
return np.array(
[[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]],
dtype=np.float64,
)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@[email protected]@.PATH_END.py
|
{
"filename": "anglescan3.py",
"repo_name": "wmpg/Supracenter",
"repo_path": "Supracenter_extracted/Supracenter-master/supra/Supracenter/anglescan3.py",
"type": "Python"
}
|
import numpy as np
from wmpl.Utils.TrajConversions import latLonAlt2ECEF, ecef2LatLonAlt
from supra.Utils.Classes import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def findLayer(h, z_profile):
found_layer = None
for ll, layer in enumerate(z_profile):
if layer[0] <= h:
found_layer = layer
found_index = ll
else:
break
return found_layer, ll
def propTime(pos, z_profile, dz, u, v, p):
# x is East, y is North
current_height = pos.elev + dz
layer, i = findLayer(current_height, z_profile)
if layer is None:
return None
s2 = (layer[1])**(-2)
U = u[i]
V = v[i]
p2 = p/(1 - p*U)
A = dz/np.sqrt(s2 - p2**2)
if np.isnan(A):
return None
# Equation (10)
dx = (p2 + s2*U)*A
# Equation (11)
dy = s2*V*A
dt = -s2/np.sqrt(s2 - p**2/(1 - p*u[i])**2)*dz
return np.array([dx, dy, dz, dt])
def anglescan(S, phi, theta, z_profile, wind=True, debug=True, trace=False, plot=False, dz=-50, last_z=0):
# Originally by Wayne Edwards (Supracenter)
""" Ray-traces from a point given initial launch angles
Arguments:
S: [list] [x, y, z] of initial launch point (Supracenter or Wave-Release point)
phi: [float] initial azimuthal angle of launch [deg] with 0 deg being North and 90 deg being East
theta: [float] initial takeoff angle of launch [deg] with 90 deg being horizontal and 180 deg being vertically down
z_profile: [list] weather profile (n_layers * 4)
[[heights (increasing order) [m], speed of sound [m/s], wind speed [m/s], wind direction [rad] (same angle definition as phi)],
... ]
Keyword Arguments:
wind: [Boolean] if False sets all wind speeds to 0
debug: [Boolean] if True outputs print messages of program status
trace: [Boolean] if True returns (x, y, z, t) coordinates of the ray trace
plot: [Boolean] if True plots the ray trace
Returns:
D: [list] (x, y, z, t) final position and travel time of the raytrace
T: [list] returned if trace is set to True, (x, y, z, t) of all points along the ray-trace
"""
# Azimuths and Wind directions are measured as angles from north, and increasing clockwise to the East
phi = (phi - 90)%360
# Flip coordinate system horizontally
phi = (360 - phi)%360
phi = np.radians(phi)
theta = np.radians(theta)
# Switch to turn off winds
if not wind:
z_profile[:, 2] = 0
# z_profile[:, 1] = 330
u = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi)
v = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi+np.pi/2) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi+np.pi/2)
s_val = 1/z_profile[-1, 1]
# ray parameter
p = s_val*np.sin(theta)/(1 + s_val*u[-1]*np.sin(theta))
S_init = S
t_arrival = 0
if trace:
T = []
T.append([S.lat, S.lon, S.elev, t_arrival])
# ignore negative roots
np.seterr(divide='ignore', invalid='ignore')
done = False
while not done:
S_ref = latLonAlt2ECEF(S.lat_r, S.lon_r, S.elev)
diff = propTime(S, z_profile, dz, u, v, p)
if diff is None:
return None
else:
x, y, z, t = diff
t_arrival += t
new_pos = [S_ref[0] + x, S_ref[1] + y, S_ref[2] + z]
new_geo_pos = ecef2LatLonAlt(new_pos[0], new_pos[1], new_pos[2])
S = Position(np.degrees(new_geo_pos[0]), np.degrees(new_geo_pos[1]), new_geo_pos[2])
if trace:
T.append([S.lat, S.lon, S.elev, t_arrival])
if S.elev <= last_z:
done = True
if trace and plot:
tr = np.array(T)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(tr[:, 1], tr[:, 0], tr[:, 2], c='b')
ax.plot(tr[:, 1], tr[:, 0], tr[:, 2], c='k')
ax.scatter(S_init.lon, S_init.lat, S_init.elev, c='r', marker="*")
ax.scatter(S.lon, S.lat, S.elev, c='g', marker="^")
plt.show()
D = [S.lat, S.lon, S.elev, t_arrival]
##########################
if trace:
return np.array(D), np.array(T)
else:
return np.array(D)
if __name__ == '__main__':
S = Position(45, 45, 10000)
#takeoff
theta = 135
#azimuth
phi = 0
z_profile = np.array([[ 0.0, 330.0, 4.0, 0.0],
[1000.0, 330.0, 4.0, 0.0],
[2020.0, 330.0, 4.0, 0.0],
[3023.0, 350.0, 4.0, 0.0],
[4000.0, 350.0, 4.0, 0.0],
[5400.0, 330.0, 4.0, 0.0],
[6000.0, 330.0, 4.0, 0.0],
[8500.0, 330.0, 4.0, 0.0],
[8900.0, 330.0, 4.0, 90.0],
[10000.0, 330.0, 4.0, 0.0]])
D = anglescan(S, phi, theta, z_profile, trace=True, plot=True)
print(D)
|
wmpgREPO_NAMESupracenterPATH_START.@Supracenter_extracted@Supracenter-master@supra@[email protected]@.PATH_END.py
|
{
"filename": "_autotypenumbers.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/scene/zaxis/_autotypenumbers.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutotypenumbersValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="autotypenumbers", parent_name="layout.scene.zaxis", **kwargs
):
super(AutotypenumbersValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["convert types", "strict"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@scene@zaxis@[email protected]_END.py
|
{
"filename": "test_dask_nearest_neighbors.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/tests/dask/test_dask_nearest_neighbors.py",
"type": "Python"
}
|
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from cuml.testing.utils import array_equal
from sklearn.neighbors import KNeighborsClassifier
from cuml.testing.utils import unit_param, quality_param, stress_param
from cuml.dask.common import utils as dask_utils
from cuml.common import has_scipy
from cuml.internals.safe_imports import cpu_only_import
import pytest
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
dask_cudf = gpu_only_import("dask_cudf")
pd = cpu_only_import("pandas")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
IS_ARM = platform.processor() == "aarch64"
if IS_ARM and cp.cuda.runtime.runtimeGetVersion() < 11080:
pytest.skip(
"Test hang in AARCH64 with CUDA < 11.8: "
"https://github.com/rapidsai/cuml/issues/5673",
allow_module_level=True,
)
def predict(neigh_ind, _y, n_neighbors):
if has_scipy():
import scipy.stats as stats
else:
raise RuntimeError("Scipy is needed to run predict()")
neigh_ind = neigh_ind.astype(np.int64)
ypred, count = stats.mode(_y[neigh_ind], axis=1)
return ypred.ravel(), count.ravel() * 1.0 / n_neighbors
def _prep_training_data(
c, X_train, partitions_per_worker, reverse_order=False
):
workers = c.has_what().keys()
if reverse_order:
workers = list(workers)[::-1]
n_partitions = partitions_per_worker * len(workers)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
X_train_df = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
(X_train_df,) = dask_utils.persist_across_workers(
c, [X_train_df], workers=list(workers)
)
return X_train_df
def _scale_rows(client, nrows):
workers = list(client.scheduler_info()["workers"].keys())
n_workers = len(workers)
return n_workers * nrows
def _test_compare_skl(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
dask_client,
request,
):
client = request.getfixturevalue(dask_client)
from cuml.dask.neighbors import NearestNeighbors as daskNN
from sklearn.datasets import make_blobs
nrows = _scale_rows(client, nrows)
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=nclusters,
random_state=0,
)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, n_parts, reverse_worker_order)
from dask.distributed import wait
wait(X_cudf)
dist = np.array([len(v) for v in client.has_what().values()])
assert np.all(dist == dist[0])
cumlModel = daskNN(
n_neighbors=n_neighbors, streams_per_handle=streams_per_handle
)
cumlModel.fit(X_cudf)
out_d, out_i = cumlModel.kneighbors(X_cudf)
local_i = np.array(out_i.compute().to_numpy(), dtype="int64")
sklModel = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
skl_y_hat = sklModel.predict(X)
y_hat, _ = predict(local_i, y, n_neighbors)
sk_d, sk_i = sklModel.kneighbors(X)
sk_i = sk_i.astype("int64")
assert array_equal(local_i[:, 0], np.arange(nrows))
diff = sk_i - local_i
n_diff = len(diff[diff > 0])
perc_diff = n_diff / (nrows * n_neighbors)
assert perc_diff <= 3e-3
assert array_equal(y_hat, skl_y_hat)
@pytest.mark.parametrize(
"nrows", [unit_param(300), quality_param(1e6), stress_param(5e8)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(15)]
)
@pytest.mark.parametrize(
"n_neighbors", [unit_param(10), quality_param(4), stress_param(100)]
)
@pytest.mark.parametrize(
"n_parts",
[unit_param(1), unit_param(5), quality_param(7), stress_param(50)],
)
@pytest.mark.parametrize(
"streams_per_handle,reverse_worker_order", [(5, True), (10, False)]
)
def test_compare_skl(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
request,
):
_test_compare_skl(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
"client",
request,
)
@pytest.mark.parametrize(
"nrows", [unit_param(300), quality_param(1e6), stress_param(5e8)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(15)]
)
@pytest.mark.parametrize(
"n_neighbors", [unit_param(10), quality_param(4), stress_param(100)]
)
@pytest.mark.parametrize(
"n_parts",
[unit_param(1), unit_param(5), quality_param(7), stress_param(50)],
)
@pytest.mark.parametrize(
"streams_per_handle,reverse_worker_order", [(5, True), (10, False)]
)
@pytest.mark.ucx
def test_compare_skl_ucx(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
request,
):
_test_compare_skl(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
"ucx_client",
request,
)
@pytest.mark.parametrize(
"nrows", [unit_param(300), quality_param(1e6), stress_param(5e8)]
)
@pytest.mark.parametrize("ncols", [10, 30])
@pytest.mark.parametrize(
"nclusters", [unit_param(5), quality_param(10), stress_param(15)]
)
@pytest.mark.parametrize(
"n_neighbors", [unit_param(10), quality_param(4), stress_param(100)]
)
@pytest.mark.parametrize(
"n_parts",
[unit_param(1), unit_param(5), quality_param(7), stress_param(50)],
)
@pytest.mark.parametrize(
"streams_per_handle,reverse_worker_order", [(5, True), (10, False)]
)
@pytest.mark.ucxx
def test_compare_skl_ucxx(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
request,
):
_test_compare_skl(
nrows,
ncols,
nclusters,
n_parts,
n_neighbors,
streams_per_handle,
reverse_worker_order,
"ucxx_client",
request,
)
def _test_batch_size(nrows, ncols, n_parts, batch_size, dask_client, request):
client = request.getfixturevalue(dask_client)
n_neighbors = 10
n_clusters = 5
from cuml.dask.neighbors import NearestNeighbors as daskNN
from sklearn.datasets import make_blobs
nrows = _scale_rows(client, nrows)
X, y = make_blobs(
n_samples=int(nrows),
n_features=ncols,
centers=n_clusters,
random_state=0,
)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, n_parts)
cumlModel = daskNN(
n_neighbors=n_neighbors, batch_size=batch_size, streams_per_handle=5
)
cumlModel.fit(X_cudf)
out_d, out_i = cumlModel.kneighbors(X_cudf)
local_i = out_i.compute().to_numpy()
y_hat, _ = predict(local_i, y, n_neighbors)
assert array_equal(y_hat, y)
@pytest.mark.parametrize("nrows", [unit_param(1000), stress_param(1e5)])
@pytest.mark.parametrize("ncols", [unit_param(10), stress_param(500)])
@pytest.mark.parametrize("n_parts", [unit_param(10), stress_param(100)])
@pytest.mark.parametrize("batch_size", [unit_param(100), stress_param(1e3)])
def test_batch_size(nrows, ncols, n_parts, batch_size, request):
_test_batch_size(nrows, ncols, n_parts, batch_size, "client", request)
@pytest.mark.parametrize("nrows", [unit_param(1000), stress_param(1e5)])
@pytest.mark.parametrize("ncols", [unit_param(10), stress_param(500)])
@pytest.mark.parametrize("n_parts", [unit_param(10), stress_param(100)])
@pytest.mark.parametrize("batch_size", [unit_param(100), stress_param(1e3)])
@pytest.mark.ucx
def test_batch_size_ucx(nrows, ncols, n_parts, batch_size, request):
_test_batch_size(nrows, ncols, n_parts, batch_size, "ucx_client", request)
@pytest.mark.parametrize("nrows", [unit_param(1000), stress_param(1e5)])
@pytest.mark.parametrize("ncols", [unit_param(10), stress_param(500)])
@pytest.mark.parametrize("n_parts", [unit_param(10), stress_param(100)])
@pytest.mark.parametrize("batch_size", [unit_param(100), stress_param(1e3)])
@pytest.mark.ucxx
def test_batch_size_ucxx(nrows, ncols, n_parts, batch_size, request):
_test_batch_size(nrows, ncols, n_parts, batch_size, "ucxx_client", request)
def _test_return_distance(dask_client, request):
client = request.getfixturevalue(dask_client)
n_samples = 50
n_feats = 50
k = 5
from cuml.dask.neighbors import NearestNeighbors as daskNN
from sklearn.datasets import make_blobs
n_samples = _scale_rows(client, n_samples)
X, y = make_blobs(n_samples=n_samples, n_features=n_feats, random_state=0)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, 1)
cumlModel = daskNN(streams_per_handle=5)
cumlModel.fit(X_cudf)
ret = cumlModel.kneighbors(X_cudf, k, return_distance=False)
assert not isinstance(ret, tuple)
ret = ret.compute()
assert ret.shape == (n_samples, k)
ret = cumlModel.kneighbors(X_cudf, k, return_distance=True)
assert isinstance(ret, tuple)
assert len(ret) == 2
def test_return_distance(request):
_test_return_distance("client", request)
@pytest.mark.ucx
def test_return_distance_ucx(request):
_test_return_distance("ucx_client", request)
@pytest.mark.ucxx
def test_return_distance_ucxx(request):
_test_return_distance("ucxx_client", request)
def _test_default_n_neighbors(dask_client, request):
client = request.getfixturevalue(dask_client)
n_samples = 50
n_feats = 50
k = 15
from cuml.dask.neighbors import NearestNeighbors as daskNN
from cuml.neighbors.nearest_neighbors_mg import (
NearestNeighborsMG as cumlNN,
)
from sklearn.datasets import make_blobs
n_samples = _scale_rows(client, n_samples)
X, y = make_blobs(n_samples=n_samples, n_features=n_feats, random_state=0)
X = X.astype(np.float32)
X_cudf = _prep_training_data(client, X, 1)
cumlModel = daskNN(streams_per_handle=5)
cumlModel.fit(X_cudf)
ret = cumlModel.kneighbors(X_cudf, return_distance=False)
assert ret.shape[1] == cumlNN().n_neighbors
cumlModel = daskNN(n_neighbors=k)
cumlModel.fit(X_cudf)
ret = cumlModel.kneighbors(X_cudf, k, return_distance=False)
assert ret.shape[1] == k
def test_default_n_neighbors(request):
_test_default_n_neighbors("client", request)
@pytest.mark.ucx
def test_default_n_neighbors_ucx(request):
_test_default_n_neighbors("ucx_client", request)
@pytest.mark.ucxx
def test_default_n_neighbors_ucxx(request):
_test_default_n_neighbors("ucxx_client", request)
def _test_one_query_partition(dask_client, request):
client = request.getfixturevalue(dask_client) # noqa
from cuml.dask.neighbors import NearestNeighbors as daskNN
from cuml.dask.datasets import make_blobs
X_train, _ = make_blobs(n_samples=4000, n_features=16, n_parts=8)
X_test, _ = make_blobs(n_samples=200, n_features=16, n_parts=1)
cumlModel = daskNN(n_neighbors=4)
cumlModel.fit(X_train)
cumlModel.kneighbors(X_test)
def test_one_query_partition(request):
_test_one_query_partition("client", request)
@pytest.mark.ucx
def test_one_query_partition_ucx(request):
_test_one_query_partition("ucx_client", request)
@pytest.mark.ucxx
def test_one_query_partition_ucxx(request):
_test_one_query_partition("ucxx_client", request)
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@tests@dask@[email protected]_END.py
|
{
"filename": "pvextractor.py",
"repo_name": "radio-astro-tools/pvextractor",
"repo_path": "pvextractor_extracted/pvextractor-main/pvextractor/pvextractor.py",
"type": "Python"
}
|
from __future__ import print_function
import numpy as np
import warnings
from astropy import units as u
from astropy.io.fits import PrimaryHDU, ImageHDU, Header
from .utils.wcs_utils import get_spatial_scale, sanitize_wcs
from .geometry import extract_slice
from .geometry import path as paths
from .utils.wcs_slicing import slice_wcs
def extract_pv_slice(cube, path, wcs=None, spacing=1.0, order=3,
respect_nan=True, assert_square=True):
"""
Given a position-position-velocity cube with dimensions (nv, ny, nx), and
a path, extract a position-velocity slice.
Alternative implementations:
gipsy::sliceview
karma::kpvslice
casaviewer::slice
Parameters
----------
cube : :class:`~numpy.ndarray` or :class:`~spectral_cube.SpectralCube` or str or HDU
The cube to extract a slice from. If this is a plain
:class:`~numpy.ndarray` instance, the WCS information can optionally
be specified with the ``wcs`` parameter. If a string, it should be
the name of a file containing a spectral cube.
path : `Path` or list of 2-tuples
The path along which to define the position-velocity slice. The path
can contain coordinates defined in pixel or world coordinates.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS information to use for the cube. This should only be
specified if the ``cube`` parameter is a plain
:class:`~numpy.ndarray` instance.
spacing : float
The position resolution in the final position-velocity slice. This
can be given in pixel coordinates or as a
:class:`~astropy.units.Quantity` instance with angle units.
order : int, optional
Spline interpolation order when using paths with zero width. Does not
have any effect for paths with a non-zero width.
respect_nan : bool, optional
If set to `False`, NaN values are changed to zero before computing
the slices. If set to `True`, in the case of line paths a second
computation is performed to ignore the NaN value while interpolating,
and set the output values of NaNs to NaN.
assert_square : bool
If True, the WCS-loader will check whether the pixels are square.
If the pixels are not square, the interpretation of the X-axis in
the extracted PV diagram is ambiguous. In some cases, it may be
necessary to disable this check, though.
Returns
-------
slice : `PrimaryHDU`
The position-velocity slice, as a FITS HDU object
"""
if isinstance(cube, (str, ImageHDU, PrimaryHDU)):
try:
from spectral_cube import SpectralCube
cube = SpectralCube.read(cube)
except ImportError:
raise ImportError("spectral_cube package required for working "
"with fits data. Install spectral_cube or "
"use NumPy arrays")
if _is_spectral_cube(cube):
wcs = cube.wcs
# The fits HEADER will preserve the UNIT, but pvextractor does not care
# what the flux units are
cube = cube.filled_data[...].value
if wcs is not None:
wcs = sanitize_wcs(wcs)
if not isinstance(cube, np.ndarray) or wcs is not None:
try:
scale = get_spatial_scale(wcs, assert_square=assert_square)
except AssertionError as ex:
print("Pixels are non-square. See error below. You may "
"disable this check by setting assert_square=False.")
raise ex
if isinstance(spacing, u.Quantity):
pixel_spacing = (spacing / scale).decompose()
world_spacing = spacing
else:
pixel_spacing = spacing
world_spacing = spacing * scale
else:
if isinstance(spacing, u.Quantity):
raise TypeError("No WCS has been specified, so spacing should be given in pixels")
else:
pixel_spacing = spacing
world_spacing = None
# Allow path to be passed in as list of 2-tuples
if not isinstance(path, paths.Path):
path = paths.Path(path)
pv_slice = extract_slice(cube, path, wcs=wcs, spacing=pixel_spacing,
order=order, respect_nan=respect_nan)
# Generate output header
if wcs is None:
header = Header()
else:
header = slice_wcs(wcs, spatial_scale=world_spacing).to_header()
# TODO: write path to BinTableHDU
return PrimaryHDU(data=pv_slice, header=header)
def _is_spectral_cube(obj):
try:
from spectral_cube.spectral_cube import BaseSpectralCube
return isinstance(obj, BaseSpectralCube)
except ImportError:
if 'SpectralCube' in str(obj.__class__):
warnings.warn("Object appears to be a SpectralCube, but"
" the spectral_cube module could not be loaded")
return False
|
radio-astro-toolsREPO_NAMEpvextractorPATH_START.@pvextractor_extracted@pvextractor-main@[email protected]@.PATH_END.py
|
{
"filename": "test_gs_fcoll.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/tests/adv/test_gs_fcoll.py",
"type": "Python"
}
|
"""
test_21cm_basic.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Mon Oct 1 15:23:53 2012
Description: Make sure the global 21-cm signal calculator works.
"""
import ares
import matplotlib.pyplot as pl
def test():
sim = ares.simulations.Global21cm(verbose=False, progress_bar=False)
sim.run()
sim.GlobalSignature()
pl.savefig('{!s}.png'.format(__file__.rstrip('.py')))
pl.close()
assert True
if __name__ == '__main__':
test()
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@tests@adv@[email protected]_END.py
|
{
"filename": "conf.py",
"repo_name": "astropenguin/ndradex",
"repo_path": "ndradex_extracted/ndradex-main/docs/conf.py",
"type": "Python"
}
|
# Project information
author = "Akio Taniguchi"
copyright = "2019-2023 Akio Taniguchi"
# General configuration
extensions = [
"myst_parser",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# Options for HTML output
html_static_path = ["_static"]
html_theme = "pydata_sphinx_theme"
html_theme_options = {
"logo": {"text": "ndRADEX"},
"github_url": "https://github.com/astropenguin/ndradex/",
"twitter_url": "https://twitter.com/astropengu_in/",
}
|
astropenguinREPO_NAMEndradexPATH_START.@ndradex_extracted@ndradex-main@[email protected]@.PATH_END.py
|
{
"filename": "massdef.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/halos/massdef.py",
"type": "Python"
}
|
__all__ = ("mass2radius_lagrangian", "convert_concentration", "MassDef",
"MassDef200m", "MassDef200c", "MassDef500c", "MassDefVir",
"MassDefFof", "mass_translator",)
from functools import cached_property
import numpy as np
from .. import CCLAutoRepr, CCLNamedClass, lib, check
from . import Concentration, HaloBias, MassFunc
def mass2radius_lagrangian(cosmo, M):
""" Returns Lagrangian radius for a halo of mass :math:`M`.
The Lagrangian radius is defined as that enclosing
the mass of the halo assuming a homogeneous Universe.
.. math::
R = \\left(\\frac{3\\,M}{4\\pi\\,\\rho_{M,0}}\\right)^{1/3}
Args:
cosmo (:class:`~pyccl.cosmology.Cosmology`): A Cosmology object.
M (:obj:`float` or `array`): halo mass in units of :math:`M_\\odot`.
Returns:
(:obj:`float` or `array`): Lagrangian radius in comoving Mpc.
"""
M_use = np.atleast_1d(M)
R = (M_use / (4.18879020479 * cosmo.rho_x(1, 'matter')))**(1./3.)
if np.ndim(M) == 0:
return R[0]
return R
def convert_concentration(cosmo, *, c_old, Delta_old, Delta_new):
""" Computes the concentration parameter for a different mass definition.
This is done assuming an NFW profile. The output concentration ``c_new`` is
found by solving the equation:
.. math::
f(c_{\\rm old}) \\Delta_{\\rm old} = f(c_{\\rm new}) \\Delta_{\\rm new}
where
.. math::
f(x) = \\frac{x^3}{\\log(1+x) - x/(1+x)}.
Args:
cosmo (:class:`~pyccl.cosmology.Cosmology`): A Cosmology object.
c_old (:obj:`float` or `array`): concentration to translate from.
Delta_old (:obj:`float`): Delta parameter associated to the input
concentration. See description of the :class:`MassDef` class.
Delta_new (:obj:`float`): Delta parameter associated to the output
concentration.
Returns:
(:obj:`float` or `array`): concentration parameter for the new
mass definition.
"""
status = 0
c_old_use = np.atleast_1d(c_old)
c_new, status = lib.convert_concentration_vec(cosmo.cosmo,
Delta_old, c_old_use,
Delta_new, c_old_use.size,
status)
check(status, cosmo=cosmo)
if np.isscalar(c_old):
return c_new[0]
return c_new
class MassDef(CCLAutoRepr, CCLNamedClass):
"""Halo mass definition. Halo masses are defined in terms of an overdensity
parameter :math:`\\Delta` and an associated density :math:`X` (either the
matter density or the critical density):
.. math::
M_\\Delta = \\frac{4 \\pi}{3} \\Delta\\,\\rho_X\\, R_\\Delta^3
where :math:`R_\\Delta` is the halo radius. This object also holds methods
to translate between :math:`R_\\Delta` and :math:`M_\\Delta`, and to
translate masses between different definitions if a concentration-mass
relation is provided.
You may also define halo masses based on a Friends-of-Friends algorithm,
in which case simply pass ``Delta='fof'`` below.
Args:
Delta (:obj:`float`): overdensity parameter. Pass ``'vir'`` if using virial
overdensity. Pass ``'fof'`` for Friends-of-Friends halos.
rho_type (:obj:`str`): either 'critical' or 'matter'.
""" # noqa
__eq_attrs__ = ("name",)
def __init__(self, Delta, rho_type):
# Check it makes sense
if isinstance(Delta, str):
if Delta.isdigit():
Delta = int(Delta)
elif Delta not in ["fof", "vir"]:
raise ValueError(f"Unknown Delta type {Delta}.")
if isinstance(Delta, (int, float)) and Delta < 0:
raise ValueError("Delta must be a positive number.")
if rho_type not in ['matter', 'critical']:
raise ValueError("rho_type must be {'matter', 'critical'}.")
self.Delta = Delta
self.rho_type = rho_type
@cached_property
def name(self):
"""Give a name to this mass definition."""
if isinstance(self.Delta, (int, float)):
return f"{self.Delta}{self.rho_type[0]}"
return f"{self.Delta}"
def __repr__(self):
return f"MassDef(Delta={self.Delta}, rho_type={self.rho_type})"
def get_Delta_vir(self, cosmo, a):
""" Computes the virial collapse density contrast with respect
to the critical density assuming a :math:`\\Lambda` CDM model. We
use the fitting function from
`Bryan and Norman 1998 <https://arxiv.org/abs/astro-ph/9710107>`_.
The virial overdensity is returned for the density type of this
object's mass definition (e.g. 'critical' or 'matter').
Args:
cosmo (:class:`~pyccl.cosmology.Cosmology`): A Cosmology object.
a (:obj:`float`): scale factor
Returns:
:obj:`float`: value of the virial overdensity.
"""
Omz = cosmo.omega_x(a, 'matter')
x = Omz-1
# Eq. 6
Dv = 18*np.pi**2+82*x-39*x**2
if self.rho_type == 'matter':
Dv /= Omz
return Dv
def get_Delta(self, cosmo, a):
""" Gets overdensity parameter associated to this mass
definition.
Args:
cosmo (:class:`~pyccl.cosmology.Cosmology`): A Cosmology object.
a (:obj:`float`): scale factor
Returns:
:obj:`float`: value of the overdensity parameter.
"""
if self.Delta == 'fof':
raise ValueError("FoF masses don't have an associated overdensity."
"Nor can they be translated into other masses")
if self.Delta == 'vir':
return self.get_Delta_vir(cosmo, a)
return self.Delta
def _get_Delta_m(self, cosmo, a):
""" For SO-based mass definitions, this returns the corresponding
value of Delta for a rho_matter-based definition.
"""
delta = self.get_Delta(cosmo, a)
if self.rho_type == 'matter':
return delta
om_this = cosmo.omega_x(a, self.rho_type)
om_matt = cosmo.omega_x(a, 'matter')
return delta * om_this / om_matt
def get_mass(self, cosmo, R, a):
""" Translates a halo radius into a mass
.. math::
M_\\Delta = \\frac{4 \\pi}{3} \\Delta\\,\\rho_X\\, R_\\Delta^3
Args:
cosmo (:class:`~pyccl.cosmology.Cosmology`): A Cosmology object.
R (:obj:`float` or `array`): halo radius (:math:`{\\rm Mpc}`,
physical, not comoving).
a (:obj:`float`): scale factor.
Returns:
(:obj:`float` or `array`): halo mass in units of :math:`M_\\odot`.
"""
R_use = np.atleast_1d(R)
Delta = self.get_Delta(cosmo, a)
M = 4.18879020479 * cosmo.rho_x(a, self.rho_type) * Delta * R_use**3
if np.ndim(R) == 0:
return M[0]
return M
def get_radius(self, cosmo, M, a):
""" Translates a halo mass into a radius
.. math::
R_\\Delta = \\left(\\frac{3M_\\Delta}{4 \\pi
\\rho_X\\,\\Delta}\\right)^{1/3}
Args:
cosmo (:class:`~pyccl.cosmology.Cosmology`):
A Cosmology object.
M (:obj:`float` or `array`): halo mass in units of
:math:`M_\\odot`.
a (:obj:`float`): scale factor.
Returns:
(:obj:`float` or `array`): halo radius in units of
:math:`{\\rm Mpc}` (physical, not comoving).
"""
M_use = np.atleast_1d(M)
Delta = self.get_Delta(cosmo, a)
R = (M_use / (4.18879020479 * Delta *
cosmo.rho_x(a, self.rho_type)))**(1./3.)
if np.ndim(M) == 0:
return R[0]
return R
@classmethod
def from_name(cls, name):
""" Return mass definition subclass from name string.
Args:
name (:obj:`str`):
a mass definition name (e.g. ``'200m'`` for
:math:`\\Delta=200` times the matter density).
Returns:
:class:`MassDef` instance corresponding to the input name.
"""
MassDefName = f"MassDef{name.capitalize()}"
if MassDefName in globals():
# MassDef is defined in one of the implementations below.
return globals()[MassDefName]
parser = {"c": "critical", "m": "matter"}
if len(name) < 2 or name[-1] not in parser:
# Bogus input - can't parse it.
raise ValueError("Could not parse mass definition string.")
Delta, rho_type = name[:-1], parser[name[-1]]
return cls(Delta, rho_type)
@classmethod
def create_instance(cls, input_):
if isinstance(input_, cls):
return input_
else:
return cls.from_name(input_)
@classmethod
def from_specs(cls, mass_def=None, *,
mass_function=None, halo_bias=None, concentration=None):
"""Instantiate mass definition and halo model ingredients.
Unspecified halo model ingredients are ignored. ``mass_def`` is always
instantiated.
Args:
mass_def (:obj:`MassDef`, :obj:`str` or :obj:`None`):
Mass definition. If a string, instantiate from its name.
If `None`, obtain the one from the first specified halo model
ingredient.
mass_function (:class:`~pyccl.halos.halo_model_base.MassFunc` or :obj:`str`):
Mass function subclass. Strings are auto-instantiated using
``mass_def``. ``None`` values are ignored.
halo_bias (:class:`~pyccl.halos.halo_model_base.HaloBias` or :obj:`str`):
Halo bias subclass. Strings are auto-instantiated using
``mass_def``. ``None`` values are ignored.
concentration (:class:`~pyccl.halos.halo_model_base.Concentration` or :obj:`str`):
Concentration subclass. Strings are auto-instantiated using
``mass_def``. ``None`` values are ignored.
Returns:
Tuple of up to 4 elements.
- mass_def : :class:`MassDef`
- mass_function : :class:`~pyccl.halos.halo_model_base.MassFunc`, if specified
- halo_bias : :class:`~pyccl.halos.halo_model_base.HaloBias`, if specified
- concentration : :class:`~pyccl.halos.halo_model_base.Concentration`, if specified
""" # noqa
values = mass_function, halo_bias, concentration
idx = [value is not None for value in values]
# Filter only the specified ones.
values = np.array(values)[idx]
names = np.array(["mass_function", "halo_bias", "concentration"])[idx]
Types = np.array([MassFunc, HaloBias, Concentration])[idx]
# Sanity check.
if mass_def is None:
for name, value in zip(names, values):
if isinstance(value, str):
raise ValueError(f"Need mass_def if {name} is str.")
# Instantiate mass_def.
if mass_def is not None:
mass_def = cls.create_instance(mass_def) # instantiate directly
else:
mass_def = values[0].mass_def # use the one in HMIngredients
# Instantiate halo model ingredients.
out = []
for name, value, Type in zip(names, values, Types):
instance = Type.create_instance(value, mass_def=mass_def)
out.append(instance)
# Check mass definition consistency.
if out and set([x.mass_def for x in out]) != set([mass_def]):
raise ValueError("Inconsistent mass definitions.")
return mass_def, *out
MassDef200m = MassDef(200, "matter")
MassDef200c = MassDef(200, "critical")
MassDef500c = MassDef(500, "critical")
MassDefVir = MassDef("vir", "critical")
MassDefFof = MassDef("fof", "matter")
def mass_translator(*, mass_in, mass_out, concentration):
"""Translate between mass definitions, assuming an NFW profile.
Returns a function that can be used to translate between halo
masses according to two different definitions.
Args:
mass_in (:class:`MassDef` or :obj:`str`): mass definition of the
input mass.
mass_out (:class:`MassDef` or :obj:`str`): mass definition of the
output mass.
concentration (:class:`~pyccl.halos.halo_model_base.Concentration` or :obj:`str`):
concentration-mass relation to use for the mass conversion. It must
be calibrated for masses using the ``mass_in`` definition.
Returns:
Function that ranslates between two masses. The returned function
``f`` can be called as: ``f(cosmo, M, a)``, where
``cosmo`` is a :class:`~pyccl.cosmology.Cosmology` object, ``M``
is a mass (or array of masses), and ``a`` is a scale factor.
""" # noqa
mass_in = MassDef.create_instance(mass_in)
mass_out = MassDef.create_instance(mass_out)
concentration = Concentration.create_instance(concentration,
mass_def=mass_in)
if concentration.mass_def != mass_in:
raise ValueError("mass_def of concentration doesn't match mass_in")
def translate(cosmo, M, a):
if mass_in == mass_out:
return M
c_in = concentration(cosmo, M, a)
Om_in = cosmo.omega_x(a, mass_in.rho_type)
D_in = mass_in.get_Delta(cosmo, a) * Om_in
R_in = mass_in.get_radius(cosmo, M, a)
Om_out = cosmo.omega_x(a, mass_out.rho_type)
D_out = mass_out.get_Delta(cosmo, a) * Om_out
c_out = convert_concentration(
cosmo, c_old=c_in, Delta_old=D_in, Delta_new=D_out)
R_out = R_in * c_out/c_in
return mass_out.get_mass(cosmo, R_out, a)
return translate
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@[email protected]@.PATH_END.py
|
{
"filename": "pymc_wrapper.py",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/sbi/samplers/mcmc/pymc_wrapper.py",
"type": "Python"
}
|
from typing import Any, Callable, Optional
import numpy as np
import pymc
import pytensor.tensor as pt
import torch
from arviz.data import InferenceData
from sbi.utils.torchutils import tensor2numpy
class PyMCPotential(pt.Op): # type: ignore
"""PyTensor Op wrapping a callable potential function"""
itypes = [pt.dvector] # expects a vector of parameter values when called
otypes = [
pt.dscalar,
pt.dvector,
] # outputs a single scalar value (the potential) and gradients for every input
default_output = 0 # return only potential by default
def __init__(
self,
potential_fn: Callable,
device: str,
track_gradients: bool = True,
):
"""PyTensor Op wrapping a callable potential function for use
with PyMC samplers.
Args:
potential_fn: Potential function that returns a potential given parameters
device: The device to which to move the parameters before evaluation.
track_gradients: Whether to track gradients from potential function
"""
self.potential_fn = potential_fn
self.device = device
self.track_gradients = track_gradients
def perform(self, node: Any, inputs: Any, outputs: Any) -> None:
"""Compute potential and possibly gradients from input parameters
Args:
node: A "node" that represents the computation, handled internally
by PyTensor.
inputs: A sequence of inputs to the operation of type `itypes`. In this
case, the sequence will contain one array containing the
simulator parameters.
outputs: A sequence allocated for storing operation outputs. In this
case, the sequence will contain one scalar for the computed potential
and an array containing the gradient of the potential with respect
to the simulator parameters.
"""
# unpack and handle inputs
params = inputs[0]
params = (
torch.tensor(params)
.to(device=self.device, dtype=torch.float32)
.requires_grad_(self.track_gradients)
)
# call the potential function
energy = self.potential_fn(params, track_gradients=self.track_gradients)
# output the log-likelihood
outputs[0][0] = tensor2numpy(energy).astype(np.float64)
# compute and record gradients if desired
if self.track_gradients:
energy.backward()
grads = params.grad
outputs[1][0] = tensor2numpy(grads).astype(np.float64)
else:
outputs[1][0] = np.zeros(params.shape, dtype=np.float64)
def grad(self, inputs: Any, output_grads: Any) -> list:
"""Get gradients computed from `perform` and return Jacobian-Vector product
Args:
inputs: A sequence of inputs to the operation of type `itypes`. In this
case, the sequence will contain one array containing the
simulator parameters.
output_grads: A sequence of the gradients of the output variables. The first
element will be the gradient of the output of the whole computational
graph with respect to the output of this specific operation, i.e.,
the potential.
Returns:
A list containing the gradient of the output of the whole computational
graph with respect to the input of this operation, i.e.,
the simulator parameters.
"""
# get outputs from forward pass (but doesn't re-compute it, I think...)
value = self(*inputs)
gradients = value.owner.outputs[1:] # type: ignore
# compute and return JVP
return [(output_grads[0] * grad) for grad in gradients]
class PyMCSampler:
"""Interface for PyMC samplers"""
def __init__(
self,
potential_fn: Callable,
initvals: np.ndarray,
step: str = "nuts",
draws: int = 1000,
tune: int = 1000,
chains: Optional[int] = None,
mp_ctx: str = "spawn",
progressbar: bool = True,
param_name: str = "theta",
device: str = "cpu",
):
"""Interface for PyMC samplers
Args:
potential_fn: Potential function from density estimator.
initvals: Initial parameters.
step: One of `"slice"`, `"hmc"`, or `"nuts"`.
draws: Number of total samples to draw.
tune: Number of tuning steps to take.
chains: Number of MCMC chains to run in parallel.
mp_ctx: Multiprocessing context for parallel sampling.
progressbar: Whether to show/hide progress bars.
param_name: Name for parameter variable, for PyMC and ArviZ structures
device: The device to which to move the parameters for potential_fn.
"""
self.param_name = param_name
self._step = step
self._draws = draws
self._tune = tune
self._initvals = [{self.param_name: iv} for iv in initvals]
self._chains = chains
self._mp_ctx = mp_ctx
self._progressbar = progressbar
self._device = device
# create PyMC model object
track_gradients = step in ("nuts", "hmc")
self._model = pymc.Model()
potential = PyMCPotential(
potential_fn, track_gradients=track_gradients, device=device
)
with self._model:
pymc.DensityDist(
self.param_name, logp=potential, size=(initvals.shape[-1],)
)
def run(self) -> np.ndarray:
"""Run MCMC with PyMC
Returns:
MCMC samples
"""
step_class = dict(slice=pymc.Slice, hmc=pymc.HamiltonianMC, nuts=pymc.NUTS)
with self._model:
inference_data = pymc.sample(
step=step_class[self._step](),
tune=self._tune,
draws=self._draws,
initvals=self._initvals, # type: ignore
chains=self._chains,
progressbar=self._progressbar,
mp_ctx=self._mp_ctx,
)
self._inference_data = inference_data
traces = inference_data.posterior # type: ignore
samples = getattr(traces, self.param_name).data
return samples
def get_samples(
self, num_samples: Optional[int] = None, group_by_chain: bool = True
) -> np.ndarray:
"""Returns samples from last call to self.run.
Raises ValueError if no samples have been generated yet.
Args:
num_samples: Number of samples to return (for each chain if grouped by
chain), if too large, all samples are returned (no error).
group_by_chain: Whether to return samples grouped by chain (chain x samples
x dim_params) or flattened (all_samples, dim_params).
Returns:
samples
"""
if self._inference_data is None:
raise ValueError("No samples found from MCMC run.")
# if not grouped by chain, flatten samples into (all_samples, dim_params)
traces = self._inference_data.posterior # type: ignore
samples = getattr(traces, self.param_name).data
if not group_by_chain:
samples = samples.reshape(-1, samples.shape[-1])
# if not specified return all samples
if num_samples is None:
return samples
# otherwise return last num_samples (for each chain when grouped).
elif group_by_chain:
return samples[:, -num_samples:, :]
else:
return samples[-num_samples:, :]
def get_inference_data(self) -> InferenceData:
"""Returns InferenceData from last call to self.run,
which contains diagnostic information in addition to samples
Raises ValueError if no samples have been generated yet.
Returns:
InferenceData containing samples and sampling run information
"""
if self._inference_data is None:
raise ValueError("No samples found from MCMC run.")
return self._inference_data
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@sbi@samplers@mcmc@[email protected]_END.py
|
{
"filename": "conf.py",
"repo_name": "tslearn-team/tslearn",
"repo_path": "tslearn_extracted/tslearn-main/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# tslearn documentation build configuration file, created by
# sphinx-quickstart on Mon May 8 21:34:49 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import sphinx_bootstrap_theme
import subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
rtd_version = os.environ.get('READTHEDOCS_VERSION', 'local')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx_gallery.gen_gallery',
'numpydoc',
'nbsphinx'
]
numpydoc_show_class_members = True
numpydoc_class_members_toctree = False
autosummary_generate = True
autosummary_generate_overwrite = False
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://numpy.org/doc/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'sklearn': ('https://scikit-learn.org/stable', None)
}
from sphinx_gallery.scrapers import matplotlib_scraper
class matplotlib_svg_scraper(object):
def __repr__(self):
return self.__class__.__name__
def __call__(self, *args, **kwargs):
return matplotlib_scraper(*args, format='svg', **kwargs)
sphinx_gallery_conf = {
'examples_dirs': ['./examples'],
'gallery_dirs': ['./auto_examples'],
'reference_url': {'tslearn': None},
'default_thumb_file': '_static/img/logo.png',
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('tslearn',),
'subsection_order': ["examples", "examples/metrics", "examples/neighbors",
"examples/clustering", "examples/classification",
"examples/autodiff", "examples/misc"].index,
'image_scrapers': (matplotlib_svg_scraper(),),
# 'binder': {
# # Required keys
# 'org': 'rtavenar',
# 'repo': 'tslearn',
# 'branch': 'master',
# 'binderhub_url': 'https://mybinder.org',
# 'dependencies': '../../requirements.txt',
# # Optional keys
# 'use_jupyter_lab': True
# }
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tslearn'
copyright = u'2017, Romain Tavenard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import tslearn
version = tslearn.__version__
# The full version, including alpha/beta/rc tags.
release = tslearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinx_rtd_theme'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site map",
'navbar_links': [
("Quick Start", "quickstart"),
("User Guide", "user_guide/userguide"),
("API", "reference"),
("Examples", "auto_examples/index"),
("Citing tslearn", "citing"),
("Code on GitHub", "https://github.com/tslearn-team/tslearn/", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Current Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': -1,
'globaltoc_includehidden': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "exclude",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "cosmo" or "sandstone".
#
# The set of valid themes depend on the version of Bootstrap
# that's used (the next config option).
#
# Currently, the supported themes are:
# - Bootstrap 2: https://bootswatch.com/2
# - Bootstrap 3: https://bootswatch.com/3
'bootswatch_theme': "lumen"
}
def setup(app):
html_css_files = ["custom.css"]
if rtd_version != 'stable':
html_js_files = ["custom.js"]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tslearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tslearn.tex', u'tslearn Documentation',
u'Romain Tavenard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tslearn', u'tslearn Documentation',
[u'Romain Tavenard'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tslearn', u'tslearn Documentation',
u'Romain Tavenard', 'tslearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
REVISION_CMD = 'git rev-parse --short HEAD'
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print('Failed to execute git to get revision')
return None
return revision.decode('utf-8')
def linkcode_resolve(domain, info):
def find_source():
# try to find the file and line number, based on code from numpy:
# https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
import inspect
import os
fn = inspect.getsourcefile(obj)
fn = os.path.relpath(fn, start=os.path.dirname(tslearn.__file__))
source, lineno = inspect.getsourcelines(obj)
return fn, lineno, lineno + len(source) - 1
if domain != 'py' or not info['module']:
return None
try:
filename = 'tslearn/%s#L%d-L%d' % find_source()
except Exception:
filename = info['module'].replace('.', '/') + '.py'
revision = _get_git_revision()
return "https://github.com/tslearn-team/tslearn/blob/%s/%s" % (revision,
filename)
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
|
tslearn-teamREPO_NAMEtslearnPATH_START.@tslearn_extracted@tslearn-main@[email protected]@.PATH_END.py
|
{
"filename": "iaf.py",
"repo_name": "ML4GW/amplfi",
"repo_path": "amplfi_extracted/amplfi-main/amplfi/train/architectures/flows/iaf.py",
"type": "Python"
}
|
import torch
import torch.distributions as dist
from pyro.distributions.conditional import ConditionalComposeTransformModule
from pyro.distributions.transforms import ConditionalAffineAutoregressive
from pyro.nn import ConditionalAutoRegressiveNN
from . import FlowArchitecture
class InverseAutoregressiveFlow(FlowArchitecture):
def __init__(
self,
*args,
hidden_features: int = 50,
num_transforms: int = 5,
num_blocks: int = 2,
activation: torch.nn.modules.activation = torch.nn.Tanh(),
**kwargs,
):
super().__init__(*args, **kwargs)
self.hidden_features = hidden_features
self.num_blocks = num_blocks
self.num_transforms = num_transforms
self.activation = activation
# register these as buffers so the
# distributions are moved to the correct device
self.register_buffer("mean", torch.zeros(self.num_params))
self.register_buffer("std", torch.ones(self.num_params))
# build the sequence of transforms
self.transforms = self.build_transforms()
def transform_block(self):
"""Returns single autoregressive transform"""
arn = ConditionalAutoRegressiveNN(
self.num_params,
self.embedding_net.context_dim,
self.num_blocks * [self.hidden_features],
nonlinearity=self.activation,
)
return ConditionalAffineAutoregressive(arn)
def distribution(self):
"""Returns the base distribution for the flow"""
return dist.Normal(
self.mean,
self.std,
)
def build_transforms(self):
"""Build the transform"""
transforms = []
for _ in range(self.num_transforms):
transform = self.transform_block()
transforms.extend([transform])
return ConditionalComposeTransformModule(transforms)
class MaskedAutoregressiveFlow(InverseAutoregressiveFlow):
"""Affine autoregressive transforms that allow density
evaluation in a single forward pass."""
def transform_block(self):
t = super().transform_block()
return t.inv
|
ML4GWREPO_NAMEamplfiPATH_START.@amplfi_extracted@amplfi-main@amplfi@train@architectures@[email protected]@.PATH_END.py
|
{
"filename": "test_cookbook.py",
"repo_name": "healpy/healpy",
"repo_path": "healpy_extracted/healpy-main/test/test_cookbook.py",
"type": "Python"
}
|
def test_is_seq():
import numpy as np
from healpy.cookbook import is_seq
assert not is_seq(None)
assert not is_seq(1)
assert not is_seq(1.)
assert not is_seq(np.array(1))
assert is_seq((1, 2, 3))
assert is_seq([1, 2, 3])
assert is_seq(np.array([1, 2, 3]))
assert is_seq(np.array([[1], [2], [3]]))
assert is_seq(())
assert is_seq([])
assert is_seq(np.array([]))
def test_is_seq_of_seq():
import numpy as np
from healpy.cookbook import is_seq_of_seq
assert not is_seq_of_seq(None)
assert not is_seq_of_seq(1)
assert not is_seq_of_seq(1.)
assert not is_seq_of_seq(np.array(1))
assert not is_seq_of_seq((1, 2, 3))
assert not is_seq_of_seq([1, 2, 3])
assert not is_seq_of_seq(np.array([1, 2, 3]))
assert is_seq_of_seq(((1, 2, 3), (4, 5), (6,)))
assert is_seq_of_seq([[1], [2, 3], [4, 5, 6]])
assert is_seq_of_seq(np.array([[1], [2], [3]]))
assert is_seq_of_seq(((1,), [2], np.array([3])))
assert is_seq_of_seq(())
assert is_seq_of_seq([])
assert is_seq_of_seq(np.array([]))
# allow None
assert not is_seq_of_seq([[1], [2], None], False)
assert is_seq_of_seq([[1], [2], None], True)
|
healpyREPO_NAMEhealpyPATH_START.@healpy_extracted@healpy-main@test@[email protected]_END.py
|
{
"filename": "am.py",
"repo_name": "orlox/mesa_input_data",
"repo_path": "mesa_input_data_extracted/mesa_input_data-master/2016_double_bh/scripts/am.py",
"type": "Python"
}
|
#!/usr/bin/env python
from pylab import *
import matplotlib.pyplot as plt
from matplotlib import rc
import mesa as ms
import math
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import matplotlib.patheffects as path_effects
import os
import matplotlib.gridspec as gridspec
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'], 'size' : 20})
rc('text', usetex=True)
import numpy
from mesa import *
params = {'backend': 'pdf',
'figure.figsize': [4.3, 5],
'font.family':'serif',
'font.size':10,
'font.serif': 'Times Roman',
'axes.titlesize': 'medium',
'axes.labelsize': 'medium',
'legend.fontsize': 8,
'legend.frameon' : False,
'text.usetex': True,
'figure.dpi': 600,
'lines.markersize': 4,
'lines.linewidth': 3,
'lines.antialiased': False,
'path.simplify': False,
'legend.handlelength':3,
'figure.subplot.bottom':0.1,
'figure.subplot.top':0.975,
'figure.subplot.left':0.15,
'figure.subplot.right':0.95}
mpl.rcParams.update(params)
WHITE = (1.00,1.00,1.00)
BLACK = (0.00,0.00,0.00)
ORANGE = (0.90,0.60,0.00)
SKY_BLUE = (0.35,0.70,0.90)
BLUE_GREEN = (0.00,0.60,0.50)
YELLOW = (0.95,0.90,0.25)
BLUE = (0.00,0.45,0.70)
VERMILLION = (0.80,0.40,0.00)
RED_PURPLE = (0.80,0.60,0.70)
#hexcols[0] dark bluish
#hexcols[1] light blue
#hexcols[2] greenish
#hexcols[3] dark green
#hexcols[4] brownish
#hexcols[5] light brown
#hexcols[6] pinkish
#hexcols[7] dark something redish
#hexcols[8] magentish
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\
'#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA']
fig = plt.figure()
gs1 = gridspec.GridSpec(3, 1)
gs1.update(wspace =0, hspace = 0)
axes = []
axes.append(plt.subplot(gs1[0:1, :]))
axes.append(plt.subplot(gs1[1:2, :]))
axes.append(plt.subplot(gs1[2:3, :]))
folders = ["am_low", "am_mid", "am_high"]
mrange = np.arange(0.0001,100,0.1)
jsch = np.log10(4.6*1e16*mrange/3)
jkerr = np.log10(1.5*1e16*mrange/3)
for k in [0,1,2]:
Z10 = history_data(folders[k], slname = "Z10.data", clean_starlog=False)
Z20 = history_data(folders[k], slname = "Z20.data", clean_starlog=False)
Z50 = history_data(folders[k], slname = "Z50.data", clean_starlog=False)
axes[k].plot(Z50.get("mass"), Z50.get("log_j_rot"), label = "$Z=Z_\odot/50$", color = hexcols[1])
axes[k].plot(Z20.get("mass"), Z20.get("log_j_rot"), label = "$Z=Z_\odot/20$", color = hexcols[5])
axes[k].plot(Z10.get("mass"), Z10.get("log_j_rot"), label = "$Z=Z_\odot/10$", color = hexcols[3])
axes[k].plot(mrange, jsch, label = "Schwarzchild", color = "k", ls = "--")
axes[k].plot(mrange, jkerr, label = "Kerr", ls = ":", color = "k")
if k==0:
axes[k].legend(loc=4)
axes[k].set_xlim(0,65)
axes[k].set_ylim(15.6,18.4)
axes[0].text(0.08, 0.08, "$M_\mathrm{i}\simeq 50M_\odot,\;P_\mathrm{i}=0.9\;\mathrm{d}$", fontsize = 13, transform=axes[0].transAxes)
axes[1].text(0.08, 0.08, "$M_\mathrm{i}\simeq 63M_\odot,\;P_\mathrm{i}=1.0\;\mathrm{d}$", fontsize = 13, transform=axes[1].transAxes)
axes[2].text(0.08, 0.08, "$M_\mathrm{i}\simeq 80M_\odot,\;P_\mathrm{i}=1.1\;\mathrm{d}$", fontsize = 13, transform=axes[2].transAxes)
axes[2].set_xlabel("$m/M_\odot$")
axes[2].set_ylabel("$\log\;j_\mathrm{rot}\;\mathrm{[cm^2\;s^{-1}]}$")
axes[1].set_ylabel("$\log\;j_\mathrm{rot}\;\mathrm{[cm^2\;s^{-1}]}$")
axes[0].set_ylabel("$\log\;j_\mathrm{rot}\;\mathrm{[cm^2\;s^{-1}]}$")
axes[0].set_xticklabels([])
axes[1].set_xticklabels([])
plt.savefig("am.pdf", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False)#, bbox_inches='tight', pad_inches=0.0)
|
orloxREPO_NAMEmesa_input_dataPATH_START.@mesa_input_data_extracted@mesa_input_data-master@2016_double_bh@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "dingswin/psrvlbireduce",
"repo_path": "psrvlbireduce_extracted/psrvlbireduce-master/README.md",
"type": "Markdown"
}
|
About psrvlbireduce:
This project provides pipeline for VLBI data reduction, with special focus on VLBI astrometry.
The main data reduction code, written in python-based parseltongue (Kettenis et
al. 2006), is vlbi_astrometry.py calling classes and functions from
vlbireduce.py and support_vlbireduce.py. The latter two use functions provided
in vlbatasks.py, where some new features have also been added. The pipeline was
originally developed and used by Dr. Deller for the PSRPI project (Deller et
al. 2019) and other projects. Since 2018, it has undergone large upgrade
(including systematic restructuring) made by Hao Ding. Currently, the
vlbireduce_v3 branch is already compatible with python3, and will be merged to
the main branch after being tested properly.
Should you have any inquiry about the pipeline or comments for improvements, feel free to contact Hao ([email protected]) or Adam ([email protected]).
|
dingswinREPO_NAMEpsrvlbireducePATH_START.@psrvlbireduce_extracted@[email protected]@.PATH_END.py
|
{
"filename": "automatic_differentiation.ipynb",
"repo_name": "scikit-hep/iminuit",
"repo_path": "iminuit_extracted/iminuit-main/doc/notebooks/automatic_differentiation.ipynb",
"type": "Jupyter Notebook"
}
|
# Automatic differentiation with JAX
Here we look into automatic differentiation, which can speed up fits with very many parameters.
iminuit's minimization algorithm MIGRAD uses a mix of gradient descent and Newton's method to find the minimum. Both require a first derivative, which MIGRAD usually computes numerically from finite differences. This requires many function evaluations and the gradient may not be accurate. As an alternative, iminuit also allows the user to compute the gradient and pass it to MIGRAD.
Although computing derivatives is often straight-forward, it is usually too much hassle to do manually. Automatic differentiation (AD) is an interesting alternative, it allows one to compute exact derivatives efficiently for pure Python/numpy functions. We demonstrate automatic differentiation with the JAX module, which can not only compute derivatives, but also accelerates the computation of Python code (including the gradient code) with a just-in-time compiler.
[Recommended read: Gentle introduction to AD](https://www.kaggle.com/borisettinger/gentle-introduction-to-automatic-differentiation)
## Fit of a gaussian model to a histogram
We fit a gaussian to a histogram using a maximum-likelihood approach based on Poisson statistics. This example is used to investigate how automatic differentiation can accelerate a typical fit in a counting experiment.
To compare fits with and without passing an analytic gradient fairly, we use `Minuit.strategy = 0`, which prevents Minuit from automatically computing the Hesse matrix after the fit.
```python
# !pip install jax jaxlib matplotlib numpy iminuit numba-stats
%config InlineBackend.figure_formats = ['svg']
import jax
from jax import numpy as jnp # replacement for normal numpy
from jax.scipy.special import erf # replacement for scipy.special.erf
from iminuit import Minuit
import numba as nb
import numpy as np # original numpy still needed, since jax does not cover full API
jax.config.update(
"jax_enable_x64", True
) # enable float64 precision, default is float32
print(f"JAX version {jax.__version__}")
print(f"numba version {nb.__version__}")
```
JAX version 0.4.31
numba version 0.60.0
We generate some toy data and write the negative log-likelihood (nll) for a fit to binned data, assuming Poisson-distributed counts.
**Note:** We write all statistical functions in pure Python code, to demonstrate Jax's ability to automatically differentiate and JIT compile this code. In practice, one should import JIT-able statistical distributions from jax.scipy.stats. The library versions can be expected to have fewer bugs and to be faster and more accurate than hand-written code.
```python
# generate some toy data
rng = np.random.default_rng(seed=1)
n, xe = np.histogram(rng.normal(size=10000), bins=1000)
def cdf(x, mu, sigma):
# cdf of a normal distribution, needed to compute the expected counts per bin
# better alternative for real code: from jax.scipy.stats.norm import cdf
z = (x - mu) / sigma
return 0.5 * (1 + erf(z / np.sqrt(2)))
def nll(par): # negative log-likelihood with constants stripped
amp = par[0]
mu, sigma = par[1:]
p = cdf(xe, mu, sigma)
mu = amp * jnp.diff(p)
result = jnp.sum(mu - n + n * jnp.log(n / (mu + 1e-100) + 1e-100))
return result
```
Let's check results from all combinations of using JIT and gradient and then compare the execution times.
```python
start_values = (1.5 * np.sum(n), 1.0, 2.0)
limits = ((0, None), (None, None), (0, None))
def make_and_run_minuit(fcn, grad=None):
m = Minuit(fcn, start_values, grad=grad, name=("amp", "mu", "sigma"))
m.errordef = Minuit.LIKELIHOOD
m.limits = limits
m.strategy = 0 # do not explicitly compute hessian after minimisation
m.migrad()
return m
```
```python
m1 = make_and_run_minuit(nll)
m1.fmin
```
<table>
<tr>
<th colspan="2" style="text-align:center" title="Minimizer"> Migrad </th>
</tr>
<tr>
<td style="text-align:left" title="Minimum value of function"> FCN = 496.2 </td>
<td style="text-align:center" title="Total number of function and (optional) gradient evaluations"> Nfcn = 66 </td>
</tr>
<tr>
<td style="text-align:left" title="Estimated distance to minimum and goal"> EDM = 1.84e-08 (Goal: 0.0001) </td>
<td style="text-align:center" title="Total run time of algorithms"> time = 0.4 sec </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Valid Minimum </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below EDM threshold (goal x 10) </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> No parameters at limit </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below call limit </td>
</tr>
<tr>
<td style="text-align:center;background-color:#FFF79A;color:black"> Hesse ok </td>
<td style="text-align:center;background-color:#FFF79A;color:black"> Covariance APPROXIMATE </td>
</tr>
</table>
```python
m2 = make_and_run_minuit(nll, grad=jax.grad(nll))
m2.fmin
```
<table>
<tr>
<th colspan="2" style="text-align:center" title="Minimizer"> Migrad </th>
</tr>
<tr>
<td style="text-align:left" title="Minimum value of function"> FCN = 496.2 </td>
<td style="text-align:center" title="Total number of function and (optional) gradient evaluations"> Nfcn = 26, Ngrad = 6 </td>
</tr>
<tr>
<td style="text-align:left" title="Estimated distance to minimum and goal"> EDM = 1.84e-08 (Goal: 0.0001) </td>
<td style="text-align:center" title="Total run time of algorithms"> time = 1.2 sec </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Valid Minimum </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below EDM threshold (goal x 10) </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> No parameters at limit </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below call limit </td>
</tr>
<tr>
<td style="text-align:center;background-color:#FFF79A;color:black"> Hesse ok </td>
<td style="text-align:center;background-color:#FFF79A;color:black"> Covariance APPROXIMATE </td>
</tr>
</table>
```python
m3 = make_and_run_minuit(jax.jit(nll))
m3.fmin
```
<table>
<tr>
<th colspan="2" style="text-align:center" title="Minimizer"> Migrad </th>
</tr>
<tr>
<td style="text-align:left" title="Minimum value of function"> FCN = 496.2 </td>
<td style="text-align:center" title="Total number of function and (optional) gradient evaluations"> Nfcn = 66 </td>
</tr>
<tr>
<td style="text-align:left" title="Estimated distance to minimum and goal"> EDM = 1.84e-08 (Goal: 0.0001) </td>
<td style="text-align:center" title="Total run time of algorithms"> time = 0.1 sec </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Valid Minimum </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below EDM threshold (goal x 10) </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> No parameters at limit </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below call limit </td>
</tr>
<tr>
<td style="text-align:center;background-color:#FFF79A;color:black"> Hesse ok </td>
<td style="text-align:center;background-color:#FFF79A;color:black"> Covariance APPROXIMATE </td>
</tr>
</table>
```python
m4 = make_and_run_minuit(jax.jit(nll), grad=jax.jit(jax.grad(nll)))
m4.fmin
```
<table>
<tr>
<th colspan="2" style="text-align:center" title="Minimizer"> Migrad </th>
</tr>
<tr>
<td style="text-align:left" title="Minimum value of function"> FCN = 496.2 </td>
<td style="text-align:center" title="Total number of function and (optional) gradient evaluations"> Nfcn = 26, Ngrad = 6 </td>
</tr>
<tr>
<td style="text-align:left" title="Estimated distance to minimum and goal"> EDM = 1.84e-08 (Goal: 0.0001) </td>
<td style="text-align:center" title="Total run time of algorithms"> time = 0.3 sec </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Valid Minimum </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below EDM threshold (goal x 10) </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> No parameters at limit </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below call limit </td>
</tr>
<tr>
<td style="text-align:center;background-color:#FFF79A;color:black"> Hesse ok </td>
<td style="text-align:center;background-color:#FFF79A;color:black"> Covariance APPROXIMATE </td>
</tr>
</table>
```python
from numba_stats import norm # numba jit-able version of norm
@nb.njit
def nb_nll(par):
amp = par[0]
mu, sigma = par[1:]
p = norm.cdf(xe, mu, sigma)
mu = amp * np.diff(p)
result = np.sum(mu - n + n * np.log(n / (mu + 1e-323) + 1e-323))
return result
m5 = make_and_run_minuit(nb_nll)
m5.fmin
```
<table>
<tr>
<th colspan="2" style="text-align:center" title="Minimizer"> Migrad </th>
</tr>
<tr>
<td style="text-align:left" title="Minimum value of function"> FCN = 496.2 </td>
<td style="text-align:center" title="Total number of function and (optional) gradient evaluations"> Nfcn = 82 </td>
</tr>
<tr>
<td style="text-align:left" title="Estimated distance to minimum and goal"> EDM = 5.31e-05 (Goal: 0.0001) </td>
<td style="text-align:center" title="Total run time of algorithms"> time = 2.0 sec </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Valid Minimum </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below EDM threshold (goal x 10) </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> No parameters at limit </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below call limit </td>
</tr>
<tr>
<td style="text-align:center;background-color:#FFF79A;color:black"> Hesse ok </td>
<td style="text-align:center;background-color:#FFF79A;color:black"> Covariance APPROXIMATE </td>
</tr>
</table>
```python
from timeit import timeit
times = {
"no JIT, no grad": "m1",
"no JIT, grad": "m2",
"jax JIT, no grad": "m3",
"jax JIT, grad": "m4",
"numba JIT, no grad": "m5",
}
for k, v in times.items():
t = timeit(
f"{v}.values = start_values; {v}.migrad()",
f"from __main__ import {v}, start_values",
number=1,
)
times[k] = t
```
```python
from matplotlib import pyplot as plt
x = np.fromiter(times.values(), dtype=float)
xmin = np.min(x)
y = -np.arange(len(times))
plt.barh(y, x)
for yi, k, v in zip(y, times, x):
plt.text(v, yi, f"{v/xmin:.1f}x")
plt.yticks(y, times.keys())
for loc in ("top", "right"):
plt.gca().spines[loc].set_visible(False)
plt.xlabel("execution time / s");
```

Conclusions:
1. As expected, the best results with JAX are obtained by JIT compiling function and gradient and using the gradient in the minimization. However, the performance of the Numba JIT compiled function is comparable even without computing the gradient.
1. JIT compiling the cost function with JAX but not using the gradient also gives good performance, but worse than using Numba for the same.
3. Combining the JAX JIT with the JAX gradient calculation is very important. Using only the Python-computed gradient even reduces performance in this example.
In general, the gain from using a gradient is larger for functions with hundreds of parameters, as is common in machine learning. Human-made models often have less than 10 parameters, and then the gain is not so dramatic.
## Computing covariance matrices with JAX
Automatic differentiation gives us another way to compute uncertainties of fitted parameters. MINUIT compute the uncertainties with the HESSE algorithm by default, which computes the matrix of second derivates approximately using finite differences and inverts this.
Let's compare the output of HESSE with the exact (within floating point precision) computation using automatic differentiation.
```python
m4.hesse()
cov_hesse = m4.covariance
def jax_covariance(par):
return jnp.linalg.inv(jax.hessian(nll)(par))
par = np.array(m4.values)
cov_jax = jax_covariance(par)
print(
f"sigma[amp] : HESSE = {cov_hesse[0, 0] ** 0.5:6.1f}, JAX = {cov_jax[0, 0] ** 0.5:6.1f}"
)
print(
f"sigma[mu] : HESSE = {cov_hesse[1, 1] ** 0.5:6.4f}, JAX = {cov_jax[1, 1] ** 0.5:6.4f}"
)
print(
f"sigma[sigma]: HESSE = {cov_hesse[2, 2] ** 0.5:6.4f}, JAX = {cov_jax[2, 2] ** 0.5:6.4f}"
)
```
sigma[amp] : HESSE = 100.0, JAX = 100.0
sigma[mu] : HESSE = 0.0100, JAX = 0.0100
sigma[sigma]: HESSE = 0.0071, JAX = 0.0071
Success, HESSE and JAX give the same answer within the relevant precision.
**Note:** If you compute the covariance matrix in this way from a least-squares cost function instead of a negative log-likelihood, you must multiply it by 2.
Let us compare the performance of HESSE with Jax.
```python
%%timeit -n 1 -r 3
m = Minuit(nll, par)
m.errordef = Minuit.LIKELIHOOD
m.hesse()
```
22.9 ms ± 2.51 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)
```python
%%timeit -n 1 -r 3
jax_covariance(par)
```
35 ms ± 4.11 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)
The computation with Jax is slower, but it is also more accurate (although the added precision is not relevant).
Minuit's HESSE algorithm still makes sense today. It has the advantage that it can process any function, while Jax cannot. Jax cannot differentiate a function that calls into C/C++ code or Cython code, for example.
Final note: If we JIT compile `jax_covariance`, it greatly outperforms Minuit's HESSE algorithm, but that only makes sense if you need to compute the hessian at different parameter values, so that the extra time spend to compile is balanced by the time saved over many invocations. This is not what happens here, the Hessian in only needed at the best fit point.
```python
%%timeit -n 1 -r 3 jit_jax_covariance = jax.jit(jax_covariance); jit_jax_covariance(par)
jit_jax_covariance(par)
```
107 μs ± 10.3 μs per loop (mean ± std. dev. of 3 runs, 1 loop each)
It is much faster... but only because the compilation cost is excluded here.
```python
%%timeit -n 1 -r 1
# if we include the JIT compilation cost, the performance drops dramatically
@jax.jit
def jax_covariance(par):
return jnp.linalg.inv(jax.hessian(nll)(par))
jax_covariance(par)
```
429 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
With compilation cost included, it is much slower.
Conclusion: Using the JIT compiler makes a lot of sense if the covariance matrix has to be computed repeatedly for the same cost function but different parameters, but this is not the case when we use it to compute parameter errors.
## Fit data points with uncertainties in x and y
Let's say we have some data points $(x_i \pm \sigma_{x,i}, y_i \pm \sigma_{y,i})$ and we have a model $y=f(x)$ that we want to adapt to this data. If $\sigma_{x,i}$ was zero, we could use the usual least-squares method, minimizing the sum of squared residuals $r^2_i = (y_i - f(x_i))^2 / \sigma^2_{y,i}$. Here, we don't know where to evaluate $f(x)$, since the exact $x$-location is only known up to $\sigma_{x,i}$.
We can approximately extend the standard least-squares method to handle this case. We use that the uncertainty along the $x$-axis can be converted into an additional uncertainty along the $y$-axis with error propagation,
$$
f(x_i \pm \sigma_{x,i}) \simeq f(x_i) \pm f'(x_i)\,\sigma_{x,i}.
$$
Using this, we obtain modified squared residuals
$$
r^2_i = \frac{(y_i - f(x_i))^2}{\sigma^2_{y,i} + (f'(x_i) \,\sigma_{x,i})^2}.
$$
We demonstrate this with a fit of a polynomial.
```python
# polynomial model
def f(x, par):
return jnp.polyval(par, x)
# true polynomial f(x) = x^2 + 2 x + 3
par_true = np.array((1, 2, 3))
# grad computes derivative with respect to the first argument
f_prime = jax.jit(jax.grad(f))
# checking first derivative f'(x) = 2 x + 2
assert f_prime(0.0, par_true) == 2
assert f_prime(1.0, par_true) == 4
assert f_prime(2.0, par_true) == 6
# ok!
# generate toy data
n = 30
data_x = np.linspace(-4, 7, n)
data_y = f(data_x, par_true)
rng = np.random.default_rng(seed=1)
sigma_x = 0.5
sigma_y = 5
data_x += rng.normal(0, sigma_x, n)
data_y += rng.normal(0, sigma_y, n)
```
```python
plt.errorbar(data_x, data_y, sigma_y, sigma_x, fmt="o");
```

```python
# define the cost function
@jax.jit
def cost(par):
result = 0.0
for xi, yi in zip(data_x, data_y):
y_var = sigma_y**2 + (f_prime(xi, par) * sigma_x) ** 2
result += (yi - f(xi, par)) ** 2 / y_var
return result
cost.errordef = Minuit.LEAST_SQUARES
# test the jit-ed function
cost(np.zeros(3))
```
Array(876.49545695, dtype=float64)
```python
m = Minuit(cost, np.zeros(3))
m.migrad()
```
<table>
<tr>
<th colspan="2" style="text-align:center" title="Minimizer"> Migrad </th>
</tr>
<tr>
<td style="text-align:left" title="Minimum value of function"> FCN = 23.14 </td>
<td style="text-align:center" title="Total number of function and (optional) gradient evaluations"> Nfcn = 91 </td>
</tr>
<tr>
<td style="text-align:left" title="Estimated distance to minimum and goal"> EDM = 3.12e-05 (Goal: 0.0002) </td>
<td style="text-align:center" title="Total run time of algorithms"> </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Valid Minimum </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below EDM threshold (goal x 10) </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> No parameters at limit </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Below call limit </td>
</tr>
<tr>
<td style="text-align:center;background-color:#92CCA6;color:black"> Hesse ok </td>
<td style="text-align:center;background-color:#92CCA6;color:black"> Covariance accurate </td>
</tr>
</table><table>
<tr>
<td></td>
<th title="Variable name"> Name </th>
<th title="Value of parameter"> Value </th>
<th title="Hesse error"> Hesse Error </th>
<th title="Minos lower error"> Minos Error- </th>
<th title="Minos upper error"> Minos Error+ </th>
<th title="Lower limit of the parameter"> Limit- </th>
<th title="Upper limit of the parameter"> Limit+ </th>
<th title="Is the parameter fixed in the fit"> Fixed </th>
</tr>
<tr>
<th> 0 </th>
<td> x0 </td>
<td> 1.25 </td>
<td> 0.15 </td>
<td> </td>
<td> </td>
<td> </td>
<td> </td>
<td> </td>
</tr>
<tr>
<th> 1 </th>
<td> x1 </td>
<td> 1.5 </td>
<td> 0.5 </td>
<td> </td>
<td> </td>
<td> </td>
<td> </td>
<td> </td>
</tr>
<tr>
<th> 2 </th>
<td> x2 </td>
<td> 1.6 </td>
<td> 1.5 </td>
<td> </td>
<td> </td>
<td> </td>
<td> </td>
<td> </td>
</tr>
</table><table>
<tr>
<td></td>
<th> x0 </th>
<th> x1 </th>
<th> x2 </th>
</tr>
<tr>
<th> x0 </th>
<td> 0.0223 </td>
<td style="background-color:rgb(181,181,250);color:black"> -0.039 <strong>(-0.530)</strong> </td>
<td style="background-color:rgb(165,165,250);color:black"> -0.150 <strong>(-0.657)</strong> </td>
</tr>
<tr>
<th> x1 </th>
<td style="background-color:rgb(181,181,250);color:black"> -0.039 <strong>(-0.530)</strong> </td>
<td> 0.24 </td>
<td style="background-color:rgb(250,216,216);color:black"> 0.17 <strong>(0.230)</strong> </td>
</tr>
<tr>
<th> x2 </th>
<td style="background-color:rgb(165,165,250);color:black"> -0.150 <strong>(-0.657)</strong> </td>
<td style="background-color:rgb(250,216,216);color:black"> 0.17 <strong>(0.230)</strong> </td>
<td> 2.32 </td>
</tr>
</table>
```python
plt.errorbar(data_x, data_y, sigma_y, sigma_x, fmt="o", label="data")
x = np.linspace(data_x[0], data_x[-1], 200)
par = np.array(m.values)
plt.plot(x, f(x, par), label="fit")
plt.legend()
# check fit quality
chi2 = m.fval
ndof = len(data_y) - 3
plt.title(f"$\\chi^2 / n_\\mathrm{{dof}} = {chi2:.2f} / {ndof} = {chi2/ndof:.2f}$");
```

We obtained a good fit.
|
scikit-hepREPO_NAMEiminuitPATH_START.@iminuit_extracted@iminuit-main@doc@notebooks@[email protected]_END.py
|
{
"filename": "io.py",
"repo_name": "HERA-Team/hera_cal",
"repo_path": "hera_cal_extracted/hera_cal-main/hera_cal/io.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
from __future__ import annotations
import numpy as np
from collections import OrderedDict as odict
import operator
import os
import copy
import warnings
import inspect
from functools import reduce
from collections.abc import Iterable
from pyuvdata import UVCal, UVData, Telescope
from pyuvdata import utils as uvutils
from astropy import units
from astropy.coordinates import EarthLocation
from astropy.io import fits
import h5py
import scipy
import pickle
import random
import glob
from pyuvdata.utils import POL_STR2NUM_DICT, POL_NUM2STR_DICT, ENU_from_ECEF, XYZ_from_LatLonAlt
import argparse
from hera_filters.dspec import place_data_on_uniform_grid
from typing import Literal
from pathlib import Path
from functools import cached_property
from astropy.time import Time
from contextlib import contextmanager
from functools import lru_cache
from pyuvdata.uvdata import FastUVH5Meta
from pyuvdata.uvdata.initializers import set_phase_params
import pyuvdata
from pyuvdata.utils.io.fits import _gethduaxis, _indexhdus
try:
import aipy
AIPY = True
except ImportError:
AIPY = False
from . import utils
from . import redcal
from .datacontainer import DataContainer
from .utils import polnum2str, polstr2num, jnum2str, jstr2num, filter_bls, chunk_baselines_by_redundant_groups
from .utils import split_pol, conj_pol, split_bl, LST2JD, JD2LST, HERA_TELESCOPE_LOCATION
# The following two functions are potentially called MANY times with
# the same arguments, so we cache them to speed things up.
polnum2str = lru_cache(polnum2str)
polstr2num = lru_cache(polstr2num)
def _parse_input_files(inputs, name='input_data'):
if isinstance(inputs, (str, Path)):
filepaths = [inputs]
elif isinstance(inputs, Iterable): # List loading
if np.all([isinstance(i, (str, Path)) for i in inputs]): # List of visibility data paths
filepaths = list(inputs)
else:
raise TypeError(f'If {name} is a list, it must be a list of strings or Paths.')
else:
raise ValueError(f'{name} must be a string or a list of strings.')
for f in filepaths:
if not os.path.exists(f):
raise IOError(f'Cannot find file {f} in {os.getcwd()}')
return filepaths
class HERACal(UVCal):
'''HERACal is a subclass of pyuvdata.UVCal meant to serve as an interface between
pyuvdata-readable calfits files and dictionaries (the in-memory format for hera_cal)
that map antennas and polarizations to gains, flags, and qualities. Supports standard
UVCal functionality, along with read() and update() functionality for going back and
forth to dictionaires. Upon read(), stores useful metadata internally.
Does not support partial data loading or writing. Assumes a single spectral window.
'''
def __init__(self, input_cal):
'''Instantiate a HERACal object. Currently only supports calfits files.
Arguments:
input_cal: string calfits file path or list of paths
'''
super().__init__()
# parse input_data as filepath(s)
self.filepaths = _parse_input_files(input_cal, name='input_cal')
def _extract_metadata(self):
'''Extract and store useful metadata and array indexing dictionaries.'''
self.freqs = np.unique(self.freq_array)
self.times = np.unique(self.time_array)
self.pols = [jnum2str(j, x_orientation=self.telescope.x_orientation) for j in self.jones_array]
self._jnum_indices = {jnum: i for i, jnum in enumerate(self.jones_array)}
self.ants = [(ant, pol) for ant in self.ant_array for pol in self.pols]
self._antnum_indices = {ant: i for i, ant in enumerate(self.ant_array)}
def build_calcontainers(self):
'''Turns the calibration information currently loaded into the HERACal object
into ordered dictionaries that map antenna-pol tuples to calibration waterfalls.
Computes and stores internally useful metadata in the process.
Returns:
gains: dict mapping antenna-pol keys to (Nint, Nfreq) complex gains arrays
flags: dict mapping antenna-pol keys to (Nint, Nfreq) boolean flag arrays
quals: dict mapping antenna-pol keys to (Nint, Nfreq) float qual arrays
total_qual: dict mapping polarization to (Nint, Nfreq) float total quality array
'''
self._extract_metadata()
gains, flags = odict(), odict()
if self.total_quality_array is not None:
total_qual = odict()
else:
total_qual = None
if self.quality_array is not None:
quals = odict()
else:
quals = None
# build dict of gains, flags, and quals
for (ant, pol) in self.ants:
i, ip = self._antnum_indices[ant], self._jnum_indices[jstr2num(pol, x_orientation=self.telescope.x_orientation)]
gains[(ant, pol)] = np.array(self.gain_array[i, :, :, ip].T)
flags[(ant, pol)] = np.array(self.flag_array[i, :, :, ip].T)
if quals is not None:
quals[(ant, pol)] = np.array(self.quality_array[i, :, :, ip].T)
# build dict of total_qual if available
if total_qual is not None:
for pol in self.pols:
ip = self._jnum_indices[jstr2num(pol, x_orientation=self.telescope.x_orientation)]
total_qual[pol] = np.array(self.total_quality_array[:, :, ip].T)
return gains, flags, quals, total_qual
def read(self, antenna_nums=None, frequencies=None, freq_chans=None, times=None, pols=None):
'''Reads calibration information from file, computes useful metadata and returns
dictionaries that map antenna-pol tuples to calibration waterfalls. Currently, select options
only perform selection after reading, so they are not true partial I/O. However, when
initialized with a list of calibration files, non-time selection is done before concantenation,
potentially saving memory.
Arguments:
antenna_nums : array_like of int, optional. Antenna numbers The antennas numbers to keep
in the object (antenna positions and names for the removed antennas will be retained).
frequencies : array_like of float, optional. The frequencies to keep in the object, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional. The frequency channel numbers to keep in the object.
times : array_like of float, optional. The times to keep in the object, each value passed
here should exist in the time_array of one of the files in input_cal.
pols : array_like of str, optional. These strings should be convertable to polarization
numbers via pyuvdata's jstr2num e.g. ['Jee'].
Returns:
gains: dict mapping antenna-pol keys to (Nint, Nfreq) complex gains arrays
flags: dict mapping antenna-pol keys to (Nint, Nfreq) boolean flag arrays
quals: dict mapping antenna-pol keys to (Nint, Nfreq) float qual arrays
total_qual: dict mapping polarization to (Nint, Nfreq) float total quality array
'''
# if filepaths is None, this was converted to HERAData
# from a different pre-loaded object with no history of filepath
if self.filepaths is not None:
# load data
self.read_calfits(self.filepaths[0])
if pols is not None:
pols = [jstr2num(ap, x_orientation=self.telescope.x_orientation) for ap in pols]
# only read antennas present in the data and raise a warning.
my_ants = np.unique(self.ant_array)
if antenna_nums is not None:
for ant in antenna_nums:
if ant not in my_ants:
warnings.warn(f"Antenna {ant} not present in calibration solution. Skipping!")
antenna_nums = np.intersect1d(my_ants, antenna_nums)
select_dict = {'antenna_nums': antenna_nums, 'frequencies': frequencies,
'freq_chans': freq_chans, 'jones': pols}
if np.any([s is not None for s in select_dict.values()]):
self.select(inplace=True, **select_dict)
# If there's more than one file, loop over all files, downselecting and cont
if len(self.filepaths) > 1:
for fp in self.filepaths[1:]:
uvc = UVCal()
uvc.read_calfits(fp)
if np.any([s is not None for s in select_dict.values()]):
uvc.select(inplace=True, **select_dict)
self += uvc
# downselect times at the very end, since this might exclude some files in the original list
if times is not None:
self.select(times=times)
return self.build_calcontainers()
def update(self, gains=None, flags=None, quals=None, total_qual=None, tSlice=None, fSlice=None):
'''Update internal calibrations arrays (data_array, flag_array, and nsample_array)
using DataContainers (if not left as None) in preparation for writing to disk.
Arguments:
gains: optional dict mapping antenna-pol to complex gains arrays
flags: optional dict mapping antenna-pol to boolean flag arrays
quals: optional dict mapping antenna-pol to float qual arrays
total_qual: optional dict mapping polarization to float total quality array.
tSlice: optional slice of indices of the times to update. Must have the same size
as the 0th dimension of the input gains/flags/quals/total_quals
fSlice: optional slice of indices of the freqs to update. Must have the same size
as the 1st dimension of the input gains/flags/quals/total_quals
'''
# provide sensible defaults for tSlice and fSlice
if tSlice is None:
tSlice = slice(0, self.Ntimes)
if fSlice is None:
fSlice = slice(0, self.Nfreqs)
if quals is not None and self.quality_array is None:
self.quality_array = np.zeros(self.gain_array.shape, dtype=float)
# loop over and update gains, flags, and quals
data_arrays = [self.gain_array, self.flag_array, self.quality_array]
for to_update, array in zip([gains, flags, quals], data_arrays):
if to_update is not None:
for (ant, pol) in to_update.keys():
i, ip = self._antnum_indices[ant], self._jnum_indices[jstr2num(pol, x_orientation=self.telescope.x_orientation)]
array[i, fSlice, tSlice, ip] = to_update[(ant, pol)].T
# update total_qual
if total_qual is not None:
if self.total_quality_array is None:
self.total_quality_array = np.zeros(self.gain_array.shape[1:], dtype=float)
for pol in total_qual.keys():
ip = self._jnum_indices[jstr2num(pol, x_orientation=self.telescope.x_orientation)]
self.total_quality_array[fSlice, tSlice, ip] = total_qual[pol].T
def write(self, filename, spoof_missing_channels=False, **write_kwargs):
"""
Shallow wrapper for UVCal calfits writer with functionality to spoof missing channels.
Parameters
----------
filename: str
name of file to write to.
fill_in_missing_freqs: bool, optional
If True, spoof missing channels with flagged gains set equal to unity.
write_kwargs: kwarg dict
kwargs for UVCal.write_calfits
"""
if spoof_missing_channels:
writer = copy.deepcopy(self)
# Since calfits do not support frequency discontinunities, we add support here
# By spoofing frequencies between discontinunities with flagged gains.
# This line provides freqs_filled -- frequency axis with spoofed frequencies
# and inserted which is a boolean array that is True at frequencies that are being spoofed.
freqs_filled, _, _, inserted = place_data_on_uniform_grid(self.freqs, np.ones_like(self.freqs), np.ones_like(self.freqs))
writer.freq_array = freqs_filled.flatten()
writer.Nfreqs = len(freqs_filled)
writer.channel_width = np.median(writer.channel_width) * np.ones_like(writer.freq_array)
if hasattr(writer, "flex_spw_id_array") and writer.flex_spw_id_array is not None:
writer.flex_spw_id_array = np.full(writer.Nfreqs, writer.spw_array[0], dtype=int)
# insert original flags and gains into appropriate channels.
new_gains = np.ones((writer.Nants_data, writer.Nfreqs, writer.Ntimes, writer.Njones), dtype=complex)
new_gains[:, ~inserted, :, :] = writer.gain_array
new_flags = np.ones(new_gains.shape, dtype=bool)
new_flags[:, ~inserted, :, :] = writer.flag_array
new_quality = np.zeros(new_gains.shape, dtype=float)
new_quality[:, ~inserted, :, :] = writer.quality_array
new_total_quality = np.ones((writer.Nfreqs, writer.Ntimes, writer.Njones), dtype=float)
new_total_quality[~inserted, :, :] = writer.total_quality_array
if writer.total_quality_array is not None:
new_total_quality = np.zeros((writer.Nfreqs, writer.Ntimes, writer.Njones), dtype=float)
new_total_quality[~inserted, :, :] = writer.total_quality_array
writer.total_quality_array = new_total_quality
writer.flag_array = new_flags
writer.gain_array = new_gains
writer.quality_array = new_quality
writer.total_quality_array = new_total_quality
writer.write_calfits(filename, **write_kwargs)
else:
self.write_calfits(filename, **write_kwargs)
def read_hera_calfits(filenames, ants=None, pols=None,
read_gains=True, read_flags=False, read_quality=False, read_tot_quality=False,
check=False, dtype=np.complex128, verbose=False):
'''A faster interface to getting data out of HERA calfits files. Only concatenates
along time axis. Puts times in ascending order,
but does not check that files are contiguous.
Arguments:
filenames: list of files to read
ants: list of ants or (ant, [polstr]) tuples to read out of files.
Default (None) is to use the intersection of all antennas
across files.
pols: list of pol strings to read out of files
read_gains: (bool, True): read gains
read_flags (bool, False): read flags
read_quality (bool, False): read quality array
read_tot_quality (bool, False): read total quality array
check (bool, False): run sanity checks to make sure files match.
dtype (np.complex128): numpy datatype for output complex-valued arrays
verbose: print some progress messages.
Returns:
rv: dictionary with keys 'info' (metadata), 'gains' (dictionary of waterfalls
with (ant,pol) keys), 'flags', 'quality', and 'total_quality'. Will omit
keys according to read_gains, read_flags, and read_quality.
'''
info = {}
times = {}
inds = {}
# grab header information from all cal files
filenames = _parse_input_files(filenames, name='input_cal')
for cnt, filename in enumerate(filenames):
with fits.open(filename) as fname:
hdr = fname[0].header
_times = _gethduaxis(fname[0], 3)
_thash = hash(_times.tobytes())
if _thash not in times:
times[_thash] = (_times, [filename])
else:
times[_thash][1].append(filename)
hdunames = _indexhdus(fname)
nants = hdr['NAXIS6']
anthdu = fname[hdunames["ANTENNAS"]]
antdata = anthdu.data
_ants = antdata["ANTARR"][:nants].astype(int)
_ahash = hash(_ants.tobytes())
if _ahash not in inds:
inds[_ahash] = {ant: idx for idx, ant in enumerate(_ants)}
if 'ants' in info:
info['ants'].intersection_update(set(inds[_ahash].keys()))
else:
info['ants'] = set(inds[_ahash].keys())
jones_array = _gethduaxis(fname[0], 2)
_jhash = hash(jones_array.tobytes())
if _jhash not in inds:
info['x_orientation'] = x_orient = hdr['XORIENT']
_pols = [uvutils.parse_jpolstr(uvutils.JONES_NUM2STR_DICT[num], x_orientation=x_orient)
for num in jones_array]
if 'pols' in info:
info['pols'] = info['pols'].union(set(_pols))
else:
info['pols'] = set(_pols)
inds[_jhash] = {pol: idx for idx, pol in enumerate(_pols)}
inds[filename] = (inds[_ahash], inds[_jhash])
if cnt == 0:
if 'ANTXYZ' in antdata.names:
info['antpos'] = antdata["ANTXYZ"]
info['freqs'] = _gethduaxis(fname[0], 4)
info['gain_convention'] = gain_convention = hdr.pop("GNCONVEN")
info['cal_type'] = cal_type = hdr.pop("CALTYPE")
if check:
assert gain_convention == 'divide' # HERA standard
assert cal_type == 'gain' # delay-style calibration currently unsupported
assert np.all(info['freqs'] == _gethduaxis(fname[0], 4))
if ants is None:
# generate a set of ants if we didn't have one passed in
if pols is None:
pols = info['pols']
ants = set((ant,) for ant in info['ants'])
ants = set(ant + (p,) for ant in ants for p in pols)
else:
ants = set((ant,) if np.issubdtype(type(ant), np.integer) else ant for ant in ants)
# if length 1 ants are passed in, add on polarizations
ants_len1 = set(ant for ant in ants if len(ant) == 1)
if len(ants_len1) > 0:
if pols is None:
pols = info['pols']
ants = set(ant for ant in ants if len(ant) == 2)
ants = ants.union([ant + (p,) for ant in ants_len1 for p in pols])
# record polarizations as total of ones indexed in bls
pols = set(ant[1] for ant in ants)
times = list(times.values())
times.sort(key=lambda x: x[0][0])
filenames = (v[1] for v in times)
times = np.concatenate([t[0] for t in times], axis=0)
info['times'] = times
tot_times = times.size
nfreqs = info['freqs'].size
# preallocate buffers
def nan_empty(shape, dtype):
'''Allocate nan-filled buffers, in case file time/pol
misalignments lead to uninitialized data buffer slots.'''
buf = np.empty(shape, dtype=dtype)
buf.fill(np.nan)
return buf
rv = {}
if read_gains:
rv['gains'] = {ant: nan_empty((tot_times, nfreqs), dtype) for ant in ants}
if read_flags:
rv['flags'] = {ant: nan_empty((tot_times, nfreqs), bool) for ant in ants}
if read_quality:
rv['quality'] = {ant: nan_empty((tot_times, nfreqs), np.float32) for ant in ants}
if read_tot_quality:
rv['total_quality'] = {p: nan_empty((tot_times, nfreqs), np.float32) for p in info['pols']}
# bail here if all we wanted was the info
if len(rv) == 0:
return {'info': info}
# loop through files and read data
t = 0
for cnt, _filenames in enumerate(filenames):
for filename in _filenames:
antind, polind = inds[filename]
with fits.open(filename) as fname:
hdr = fname[0].header
ntimes = hdr.pop("NAXIS3")
if read_gains:
data = fname[0].data
for (a, p) in rv['gains'].keys():
if a not in antind or p not in polind:
continue
rv['gains'][a, p][t:t + ntimes].real = fname[0].data[antind[a], 0, :, :, polind[p], 0].T
rv['gains'][a, p][t:t + ntimes].imag = fname[0].data[antind[a], 0, :, :, polind[p], 1].T
if read_flags:
for (a, p) in rv['flags'].keys():
if a not in antind or p not in polind:
continue
rv['flags'][a, p][t:t + ntimes] = fname[0].data[antind[a], 0, :, :, polind[p], 2].T
if read_quality:
for (a, p) in rv['quality'].keys():
if a not in antind or p not in polind:
continue
rv['quality'][a, p][t:t + ntimes] = fname[0].data[antind[a], 0, :, :, polind[p], 3].T
if read_tot_quality:
tq_hdu = fname[hdunames["TOTQLTY"]]
for p in rv['total_quality'].keys():
if p not in polind:
continue
rv['total_quality'][p][t:t + ntimes] = tq_hdu.data[0, :, :, polind[p]].T
t += ntimes
rv['info'] = info
return rv
def get_blt_slices(uvo, tried_to_reorder=False):
'''For a pyuvdata-style UV object, get the mapping from antenna pair to blt slice.
If the UV object does not have regular spacing of baselines in its baseline-times,
this function will try to reorder it using UVData.reorder_blts() to see if that helps.
Arguments:
uvo: a "UV-Object" like UVData or baseline-type UVFlag. Blts may get re-ordered internally.
tried_to_reorder: used internally to prevent infinite recursion
Returns:
blt_slices: dictionary mapping anntenna pair tuples to baseline-time slice objects
'''
if hasattr(uvo, "blts_are_rectangular"):
if getattr(uvo, "blts_are_rectangular", None) is None:
uvo.set_rectangularity(force=True)
blt_slices = {}
if getattr(uvo, "blts_are_rectangular", False):
if uvo.time_axis_faster_than_bls:
for i in range(uvo.Nbls):
start = i * uvo.Ntimes
antp = (uvo.ant_1_array[start], uvo.ant_2_array[start])
blt_slices[antp] = slice(start, start + uvo.Ntimes, 1)
assert uvo.Nbls == len(blt_slices)
else:
for i in range(uvo.Nbls):
antp = (uvo.ant_1_array[i], uvo.ant_2_array[i])
blt_slices[antp] = slice(i, uvo.Nblts, uvo.Nbls)
assert uvo.Nbls == len(blt_slices)
else:
for ant1, ant2 in uvo.get_antpairs():
indices = uvo.antpair2ind(ant1, ant2)
if isinstance(indices, slice):
blt_slices[(ant1, ant2)] = indices
elif indices is None:
raise ValueError(f"Antpair ({ant1}, {ant2}) does not exist in the data.")
elif len(indices) == 1: # only one blt matches
blt_slices[(ant1, ant2)] = slice(indices[0], indices[0] + 1, uvo.Nblts)
elif len(set(np.ediff1d(indices))) != 1: # checks if the consecutive differences are all the same
if not tried_to_reorder:
uvo.reorder_blts(order='time')
return get_blt_slices(uvo, tried_to_reorder=True)
else:
raise NotImplementedError(
'UVData objects with non-regular spacing of '
'baselines in its baseline-times are not supported.'
f'Got indices {indices} for baseline {ant1}, {ant2}.'
)
else:
# This should only trigger for pyuvdata < 3, where you can get back
# an array of indices that are regular. In pyuvdata 3, you'd get
# back a slice if this was the case.
blt_slices[(ant1, ant2)] = slice(indices[0], indices[-1] + 1, indices[1] - indices[0])
return blt_slices
class HERAData(UVData):
'''HERAData is a subclass of pyuvdata.UVData meant to serve as an interface between
pyuvdata-compatible data formats on disk (especially uvh5) and DataContainers,
the in-memory format for visibilities used in hera_cal. In addition to standard
UVData functionality, HERAData supports read() and update() functions that interface
between internal UVData data storage and DataContainers, which contain visibility
data in a dictionary-like format, along with some useful metadata. read() supports
partial data loading, though only the most useful subset of selection modes from
pyuvdata (and not all modes for all data types).
When using uvh5, HERAData supports additional useful functionality:
* Upon __init__(), the most useful metadata describing the entire file is loaded into
the object (everything in HERAData_metas; see get_metadata_dict() for details).
* Partial writing using partial_write(), which will initialize a new file with the
same metadata and write to disk using DataContainers by assuming that the user is
writing to the same part of the data as the most recent read().
* Generators that enable iterating over baseline, frequency, or time in chunks (see
iterate_over_bls(), iterate_over_freqs(), and iterate_over_times() for details).
Assumes a single spectral window. Assumes that data for a given baseline is regularly
spaced in the underlying data_array.
'''
# static list of useful metadata to calculate and save
HERAData_metas = ['ants', 'data_ants', 'antpos', 'data_antpos', 'freqs', 'times', 'lsts',
'pols', 'antpairs', 'bls', 'times_by_bl', 'lsts_by_bl']
# ants: list of antenna numbers in the array
# data_ants: list of antenna numbers in the data file
# antpos: dictionary mapping all antenna numbers in the telescope to np.arrays of position in meters
# data_antpos: dictionary mapping all antenna numbers in the data to np.arrays of position in meters
# freqs: np.arrray of frequencies (Hz)
# times: np.array of unique times in the data file (JD)
# lsts: np.array of unique LSTs in the data file (radians)
# pols: list of baseline polarization strings
# antpairs: list of antenna number pairs in the data as 2-tuples
# bls: list of baseline-pols in the data as 3-tuples
# times_by_bl: dictionary mapping antpairs to times (JD). Also includes all reverse pairs.
# lsts_by_bl: dictionary mapping antpairs to LSTs (radians). Also includes all reverse pairs.
def __init__(self, input_data, upsample=False, downsample=False, filetype='uvh5', **read_kwargs):
'''Instantiate a HERAData object. If the filetype is either uvh5 or uvfits, read in and store
useful metadata (see get_metadata_dict()), either as object attributes or,
if input_data is a list, as dictionaries mapping string paths to metadata.
Arguments:
input_data: string data file path or list of string data file paths
upsample: bool. If True, will upsample to match the shortest integration time in the file.
Upsampling will affect the time metadata stored on this object.
downsample: bool. If True, will downsample to match the longest integration time in the file.
Downsampling will affect the time metadata stored on this object.
filetype: supports 'uvh5' (default), 'miriad', 'uvfits'
read_kwargs : kwargs to pass to UVData.read (e.g. run_check, check_extra and
run_check_acceptability). Only used for uvh5 filetype
'''
# initialize as empty UVData object
super().__init__()
# parse input_data as filepath(s)
self.filepaths = _parse_input_files(input_data, name='input_data')
# parse arguments into object
self.upsample = upsample
self.downsample = downsample
if self.upsample and self.downsample:
raise ValueError('upsample and downsample cannot both be True.')
self.filetype = filetype
# load metadata from file
if self.filetype in ['uvh5', 'uvfits']:
# read all UVData metadata from first file
temp_paths = copy.deepcopy(self.filepaths)
self.filepaths = self.filepaths[0]
self.read(read_data=False, **read_kwargs)
self.filepaths = temp_paths
self._attach_metadata(**read_kwargs)
elif self.filetype == 'miriad':
for meta in self.HERAData_metas:
setattr(self, meta, None) # no pre-loading of metadata
else:
raise NotImplementedError('Filetype ' + self.filetype + ' has not been implemented.')
# save longest and shortest integration times in the file for later use in up/downsampling
# if available, these will be used instead of the ones in self.integration_time during partial I/O
self.longest_integration = None
self.longest_integration = None
if self.integration_time is not None:
self.longest_integration = np.max(self.integration_time)
self.shortest_integration = np.min(self.integration_time)
def _attach_metadata(self, **read_kwargs):
"""
Attach metadata.
"""
if hasattr(self, "filepaths") and self.filepaths is not None and len(self.filepaths) > 1: # save HERAData_metas in dicts
for meta in self.HERAData_metas:
setattr(self, meta, {})
for f in self.filepaths:
hd = HERAData(f, filetype='uvh5', **read_kwargs)
meta_dict = hd.get_metadata_dict()
for meta in self.HERAData_metas:
getattr(self, meta)[f] = meta_dict[meta]
else: # save HERAData_metas as attributes
self._writers = {}
for key, value in self.get_metadata_dict().items():
setattr(self, key, value)
def reset(self):
'''Resets all standard UVData attributes, potentially freeing memory.'''
super(HERAData, self).__init__()
def get_metadata_dict(self):
''' Produces a dictionary of the most useful metadata. Used as object
attributes and as metadata to store in DataContainers.
Returns:
metadata_dict: dictionary of all items in self.HERAData_metas
'''
antpos = self.telescope.get_enu_antpos()
ants = self.telescope.antenna_numbers
antpos = dict(zip(ants, antpos))
data_ants = np.unique(np.concatenate((self.ant_1_array, self.ant_2_array)))
data_antpos = {ant: antpos[ant] for ant in data_ants}
# get times using the most commonly appearing baseline, presumably the one without BDA
most_common_bl_num = scipy.stats.mode(self.baseline_array, keepdims=True)[0][0]
times = self.time_array[self.baseline_array == most_common_bl_num]
lsts = self.lst_array[self.baseline_array == most_common_bl_num]
freqs = np.unique(self.freq_array)
pols = [polnum2str(polnum, x_orientation=self.telescope.x_orientation) for polnum in self.polarization_array]
antpairs = self.get_antpairs()
bls = [antpair + (pol,) for antpair in antpairs for pol in pols]
times_by_bl = {antpair: np.array(self.time_array[self._blt_slices[antpair]])
for antpair in antpairs}
times_by_bl.update({(ant1, ant0): times_here for (ant0, ant1), times_here in times_by_bl.items()})
lsts_by_bl = {antpair: np.array(self.lst_array[self._blt_slices[antpair]])
for antpair in antpairs}
lsts_by_bl.update({(ant1, ant0): lsts_here for (ant0, ant1), lsts_here in lsts_by_bl.items()})
locs = locals()
return {meta: locs[meta] for meta in self.HERAData_metas}
def _determine_blt_slicing(self):
'''Determine the mapping between antenna pairs and slices of the blt axis of the data_array.'''
self._blt_slices = get_blt_slices(self)
def get_polstr_index(self, pol: str) -> int:
num = polstr2num(pol, x_orientation=self.telescope.x_orientation)
try:
return self._polnum_indices[num]
except AttributeError:
self._determine_pol_indexing()
return self._polnum_indices[num]
def _determine_pol_indexing(self):
self._polnum_indices = {
polnum: i for i, polnum in enumerate(self.polarization_array)
}
def _get_slice(self, data_array, key):
'''Return a copy of the Nint by Nfreq waterfall or waterfalls for a given key. Abstracts
away both baseline ordering (by applying complex conjugation) and polarization capitalization.
Arguments:
data_array: numpy array of shape (Nblts, 1, Nfreq, Npol), i.e. the size of the full data.
One generally uses this object's own self.data_array, self.flag_array, or self.nsample_array.
key: if of the form (0,1,'nn'), return anumpy array.
if of the form (0,1), return a dict mapping pol strings to waterfalls.
if of of the form 'nn', return a dict mapping ant-pair tuples to waterfalls.
'''
if isinstance(key, str): # asking for a pol
return {antpair: self._get_slice(data_array, antpair + (key,)) for antpair in self.get_antpairs()}
elif len(key) == 2: # asking for antpair
return {pol: self._get_slice(data_array, key + (pol,)) for pol in self.pols}
elif len(key) == 3: # asking for bl-pol
try:
pidx = self.get_polstr_index(key[2])
if data_array.ndim == 4: # old shapes
return np.array(
data_array[self._blt_slices[tuple(key[:2])], 0, :, pidx]
)
else:
return np.array(
data_array[self._blt_slices[tuple(key[:2])], :, pidx]
)
except KeyError:
pidx = self.get_polstr_index(conj_pol(key[2]))
if data_array.ndim == 4:
return np.conj(
data_array[self._blt_slices[tuple(key[1::-1])], 0, :, pidx]
)
else:
return np.conj(
data_array[self._blt_slices[tuple(key[1::-1])], :, pidx]
)
else:
raise KeyError('Unrecognized key type for slicing data.')
def _set_slice(self, data_array, key, value):
'''Update data_array with Nint by Nfreq waterfall(s). Abstracts away both baseline
ordering (by applying complex conjugation) and polarization capitalization.
Arguments:
data_array: numpy array of shape (Nblts, 1, Nfreq, Npol), i.e. the size of the full data.
One generally uses this object's own self.data_array, self.flag_array, or self.nsample_array.
key: baseline (e.g. (0,1,'nn)), ant-pair tuple (e.g. (0,1)), or pol str (e.g. 'nn')
value: if key is a baseline, must be an (Nint, Nfreq) numpy array;
if key is an ant-pair tuple, must be a dict mapping pol strings to waterfalls;
if key is a pol str, must be a dict mapping ant-pair tuples to waterfalls
'''
if isinstance(key, str): # providing pol with all antpairs
for antpair in value.keys():
self._set_slice(data_array, (antpair + (key,)), value[antpair])
elif len(key) == 2: # providing antpair with all pols
for pol in value.keys():
self._set_slice(data_array, (key + (pol,)), value[pol])
elif len(key) == 3: # providing bl-pol
try:
pidx = self.get_polstr_index(key[2])
data_array[self._blt_slices[tuple(key[:2])], :, pidx] = value
except KeyError:
pidx = self.get_polstr_index(conj_pol(key[2]))
data_array[self._blt_slices[tuple(key[1::-1])], :, pidx] = np.conj(value)
else:
raise KeyError('Unrecognized key type for slicing data.')
def build_datacontainers(self):
'''Turns the data currently loaded into the HERAData object into DataContainers.
Returned DataContainers include useful metadata specific to the data actually
in the DataContainers (which may be a subset of the total data). This includes
antenna positions, frequencies, all times, all lsts, and times and lsts by baseline.
Returns:
data: DataContainer mapping baseline keys to complex visibility waterfalls
flags: DataContainer mapping baseline keys to boolean flag waterfalls
nsamples: DataContainer mapping baseline keys to interger Nsamples waterfalls
'''
# build up DataContainers
data, flags, nsamples = odict(), odict(), odict()
meta = self.get_metadata_dict()
for bl in meta['bls']:
data[bl] = self._get_slice(self.data_array, bl)
flags[bl] = self._get_slice(self.flag_array, bl)
nsamples[bl] = self._get_slice(self.nsample_array, bl)
data = DataContainer(data)
flags = DataContainer(flags)
nsamples = DataContainer(nsamples)
# store useful metadata inside the DataContainers
for dc in [data, flags, nsamples]:
for attr in ['ants', 'data_ants', 'antpos', 'data_antpos', 'freqs', 'times', 'lsts', 'times_by_bl', 'lsts_by_bl']:
setattr(dc, attr, copy.deepcopy(meta[attr]))
return data, flags, nsamples
def read(self, bls=None, polarizations=None, times=None, time_range=None, lsts=None, lst_range=None,
frequencies=None, freq_chans=None, axis=None, read_data=True, return_data=True,
run_check=True, check_extra=True, run_check_acceptability=True, **kwargs):
'''Reads data from file. Supports partial data loading. Default: read all data in file.
Arguments:
bls: A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'nn'), (2,3,'ee')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are provided,
the polarizations argument below must be None. Ignored if read_data is False.
polarizations: The polarizations to include when reading data into
the object. Ignored if read_data is False.
times: The times to include when reading data into the object.
Ignored if read_data is False. Miriad will load then select on this axis.
time_range : length-2 array-like of float, optional. The time range in Julian Date
to include. Cannot be used with `times`.
lsts: The lsts in radians to include when reading data into the object.
Ignored if read_data is False. Miriad will load then select on this axis.
Cannot be used with `times` or `time_range`.
lst_range : length-2 array-like of float, optional. The lst range in radians
to include when. Cannot be used with `times`, `time_range`, or `lsts`.
Miriad will load then select on this axis. If the second value is smaller than
the first, the LSTs are treated as having phase-wrapped around LST = 2*pi = 0
and the LSTs kept on the object will run from the larger value, through 0, and
end at the smaller value.
frequencies: The frequencies to include when reading data. Ignored if read_data
is False. Miriad will load then select on this axis.
freq_chans: The frequency channel numbers to include when reading data. Ignored
if read_data is False. Miriad will load then select on this axis.
axis: Axis for fast concatenation of files (if len(self.filepaths) > 1).
Allowed values are: 'blt', 'freq', 'polarization'.
read_data: Read in the visibility and flag data. If set to false, only the
basic metadata will be read in and nothing will be returned. Results in an
incompletely defined object (check will not pass). Default True.
return_data: bool, if True, return the output of build_datacontainers().
run_check: Option to check for the existence and proper shapes of
parameters after reading in the file. Default is True.
check_extra: Option to check optional parameters as well as required
ones. Default is True.
run_check_acceptability: Option to check acceptable range of the values of
parameters after reading in the file. Default is True.
kwargs: extra keyword arguments to pass to UVData.read()
Returns:
data: DataContainer mapping baseline keys to complex visibility waterfalls
flags: DataContainer mapping baseline keys to boolean flag waterfalls
nsamples: DataContainer mapping baseline keys to interger Nsamples waterfalls
'''
# save last read parameters
locs = locals()
partials = ['bls', 'polarizations', 'times', 'time_range', 'lsts', 'lst_range', 'frequencies', 'freq_chans']
self.last_read_kwargs = {p: locs[p] for p in partials}
# if filepaths is None, this was converted to HERAData
# from a different pre-loaded object with no history of filepath
if self.filepaths is not None:
temp_read = self.read # store self.read while it's being overwritten
self.read = super().read # re-define self.read so UVData can call self.read recursively for lists of files
# load data
try:
if self.filetype in ['uvh5', 'uvfits']:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Fixing phases using antenna positions')
super().read(self.filepaths, file_type=self.filetype, axis=axis, bls=bls, polarizations=polarizations,
times=times, time_range=time_range, lsts=lsts, lst_range=lst_range, frequencies=frequencies,
freq_chans=freq_chans, read_data=read_data, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
**kwargs)
if self.filetype == 'uvfits':
self.unproject_phase()
else:
if not read_data:
raise NotImplementedError('reading only metadata is not implemented for ' + self.filetype)
if self.filetype == 'miriad':
super().read(self.filepaths, file_type='miriad', axis=axis, bls=bls, polarizations=polarizations,
time_range=time_range, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
projected=False, **kwargs)
if any([times is not None, lsts is not None, lst_range is not None,
frequencies is not None, freq_chans is not None]):
warnings.warn('miriad does not support partial loading for times/lsts (except time_range) and frequencies. '
'Loading the file first and then performing select.')
self.select(times=times, lsts=lsts, lst_range=lst_range, frequencies=frequencies, freq_chans=freq_chans)
# upsample or downsample data, as appropriate, including metadata. Will use self.longest/shortest_integration
# if not None (which came from whole file metadata) since partial i/o might change the current longest or
# shortest integration in a way that would create insonsistency between partial reads/writes.
if self.upsample:
if hasattr(self, 'shortest_integration') and self.shortest_integration is not None:
self.upsample_in_time(max_int_time=self.shortest_integration)
else:
self.upsample_in_time(max_int_time=np.min(self.integration_time))
if self.downsample:
if hasattr(self, 'longest_integration') and self.longest_integration is not None:
self.downsample_in_time(min_int_time=self.longest_integration)
else:
self.downsample_in_time(min_int_time=np.max(self.integration_time))
finally:
self.read = temp_read # reset back to this function, regardless of whether the above try excecutes successfully
# It turns out that doing .read() can change the inherent rectangularity of the data
# so, we reset it:
self.set_rectangularity(force=True)
# process data into DataContainers
try:
self._clear_antpair2ind_cache(self) # required because we over-wrote read()
except AttributeError:
# pyuvdata < 3 doesn't have this method, and that's fine.
pass
self._determine_blt_slicing()
self._determine_pol_indexing()
if read_data and return_data:
return self.build_datacontainers()
def select(self, inplace=True, **kwargs):
"""
Select-out parts of a HERAData object.
Args:
inplace: Overwrite self, otherwise return a copy.
kwargs : pyuvdata.UVData select keyword arguments.
"""
# select
output = super(HERAData, self).select(inplace=inplace, **kwargs)
if inplace:
output = self
# recompute slices if necessary
names = ['antenna_nums', 'antenna_names', 'ant_str', 'bls', 'blt_inds',
'times', 'time_range', 'lsts', 'lst_range']
for n in names:
if n in kwargs and kwargs[n] is not None:
output._determine_blt_slicing()
output._determine_pol_indexing()
break
if 'polarizations' in kwargs and kwargs['polarizations'] is not None:
output._determine_pol_indexing()
if not inplace:
return output
def __add__(self, other, inplace=False, **kwargs):
"""
Combine two HERAData objects.
Combine along baseline-time, polarization or frequency.
See pyuvdata.UVData.__add__ for more details.
Args:
other : Another HERAData object
inplace: Overwrite self as we go, otherwise create a third object
as the sum of the two (default).
kwargs : UVData.__add__ keyword arguments
"""
output = super(HERAData, self).__add__(other, inplace=inplace, **kwargs)
if inplace:
output = self
output._determine_blt_slicing()
output._determine_pol_indexing()
if not inplace:
return output
def __getitem__(self, key):
"""
Shortcut for reading a single visibility waterfall given a
baseline tuple. If key exists it will return it using its
blt_slice, if it does not it will attempt to read it
from disk.
"""
try:
return self._get_slice(self.data_array, key)
except KeyError:
return self.read(bls=key)[0][key]
def update(self, data=None, flags=None, nsamples=None, tSlice=None, fSlice=None):
'''Update internal data arrays (data_array, flag_array, and nsample_array)
using DataContainers (if not left as None) in preparation for writing to disk.
Arguments:
data: Optional DataContainer mapping baselines to complex visibility waterfalls
flags: Optional DataContainer mapping baselines to boolean flag waterfalls
nsamples: Optional DataContainer mapping baselines to interger Nsamples waterfalls
tSlice: Optional slice of indices of the times to update. Must have the same size
as the 0th dimension of the input gains/flags/nsamples.
fSlice: Optional slice of indices of the freqs to update. Must have the same size
as the 1st dimension of the input gains/flags/nsamples.
'''
if data is not None:
self.set_data_array_with_datacontainer(data, self.data_array, tSlice=tSlice, fSlice=fSlice)
if flags is not None:
self.set_data_array_with_datacontainer(flags, self.flag_array, tSlice=tSlice, fSlice=fSlice)
if nsamples is not None:
self.set_data_array_with_datacontainer(nsamples, self.nsample_array, tSlice=tSlice, fSlice=fSlice)
def set_data_array_with_datacontainer(
self, dc: DataContainer, data_array: np.ndarray, tSlice=None, fSlice=None
) -> np.ndarray:
'''Convert a datacontainer to an array with uvdata format.
Note that if ``data_array`` is not provided, and tSlice or fSlice are, then
the elements outside the slices will be returned as zeros.
Arguments:
dc: DataContainer to convert.
data_array: Optional array that will be updated in-place. Must have same
shape as the instance's data_array.
tSlice: Optional slice of indices of the times to update. Must have the same size
as the 0th dimension of the input gains/flags/nsamples.
fSlice: Optional slice of indices of the freqs to update. Must have the same size
as the 1st dimension of the input gains/flags/nsamples.
'''
if data_array.shape != self.data_array.shape:
raise ValueError(f"data_array must have shape {self.data_array.shape} (same as self.data_array)")
# provide sensible defaults for tinds and finds
update_full_waterfall = (tSlice is None) and (fSlice is None)
if tSlice is None:
tSlice = slice(0, self.Ntimes)
if fSlice is None:
fSlice = slice(0, self.Nfreqs)
def _set_subslice(data_array, bl, this_waterfall):
if update_full_waterfall:
# directly write into relevant data_array
self._set_slice(data_array, bl, this_waterfall)
else:
# copy out full waterfall, update just the relevant slices, and write back to data_array
full_waterfall = self._get_slice(data_array, bl)
full_waterfall[tSlice, fSlice] = this_waterfall
self._set_slice(data_array, bl, full_waterfall)
if dc is not None:
for bl in dc.keys():
_set_subslice(data_array, bl, dc[bl])
# explicitly handle cross-polarized autos
if bl[0] == bl[1]:
# ensure that we're not looking at (pseudo-)stokes visibilities
if polstr2num(bl[2], x_orientation=self.telescope.x_orientation) < 0:
if utils.split_pol(bl[2])[0] != utils.split_pol(bl[2])[1]:
pol_reversed_bl = utils.reverse_bl(bl)
if pol_reversed_bl not in dc.keys():
if pol_reversed_bl in dc:
_set_subslice(data_array, pol_reversed_bl, dc[pol_reversed_bl])
return data_array
def partial_write(self, output_path, data=None, flags=None, nsamples=None,
clobber=False, inplace=False, add_to_history='',
**kwargs):
'''Writes part of a uvh5 file using DataContainers whose shape matches the most recent
call to HERAData.read() in this object. The overall file written matches the shape of the
input_data file called on __init__. Any data/flags/nsamples left as None will be written
as was currently stored in the HERAData object. Does not work for other filetypes or when
the HERAData object is initialized with a list of files.
Arguments:
output_path: path to file to write uvh5 file to
data: Optional DataContainer mapping baselines to complex visibility waterfalls
flags: Optional DataContainer mapping baselines to boolean flag waterfalls
nsamples: Optional DataContainer mapping baselines to interger Nsamples waterfalls
clobber: if True, overwrites existing file at output_path
inplace: update this object's data_array, flag_array, and nsamples_array.
This saves memory but alters the HERAData object.
add_to_history: string to append to history (only used on first call of
partial_write for a given output_path)
kwargs: addtional keyword arguments update UVData attributes. (Only used on
first call of partial write for a given output_path).
'''
# Type verifications
if self.filetype != 'uvh5':
raise NotImplementedError('Partial writing for filetype ' + self.filetype + ' has not been implemented.')
if len(self.filepaths) > 1:
raise NotImplementedError('Partial writing for list-loaded HERAData objects has not been implemented.')
# get writer or initialize new writer if necessary
if output_path in self._writers:
hd_writer = self._writers[output_path] # This hd_writer has metadata for the entire output file
else:
hd_writer = HERAData(self.filepaths[0])
hd_writer.history += add_to_history
for attribute, value in kwargs.items():
hd_writer.__setattr__(attribute, value)
hd_writer.initialize_uvh5_file(output_path, clobber=clobber) # Makes an empty file (called only once)
self._writers[output_path] = hd_writer
if inplace: # update this objects's arrays using DataContainers
self.update(data=data, flags=flags, nsamples=nsamples)
d, f, n = self.data_array, self.flag_array, self.nsample_array
else:
d = self.set_data_array_with_datacontainer(data, self.data_array.copy())
f = self.set_data_array_with_datacontainer(flags, self.flag_array.copy())
n = self.set_data_array_with_datacontainer(nsamples, self.nsample_array.copy())
# else: # make a copy of this object and then update the relevant arrays using DataContainers
# this = copy.deepcopy(self)
write_kwargs = {
"data_array": d,
"nsample_array": n,
"run_check_acceptability": (output_path in self._writers),
**self.last_read_kwargs,
}
# before pyuvdata 3.0, the "flag_array" parameter was called "flags_array"
if "flag_array" in inspect.signature(UVData.write_uvh5_part).parameters:
write_kwargs["flag_array"] = f
else:
write_kwargs["flags_array"] = f
hd_writer.write_uvh5_part(output_path, **write_kwargs)
def iterate_over_bls(self, Nbls=1, bls=None, chunk_by_redundant_group=False, reds=None,
bl_error_tol=1.0, include_autos=True, frequencies=None):
'''Produces a generator that iteratively yields successive calls to
HERAData.read() by baseline or group of baselines.
Arguments:
Nbls: number of baselines to load at once.
bls: optional user-provided list of baselines to iterate over.
Default: use self.bls (which only works for uvh5).
chunk_by_redundant_group: bool, optional
If true, retrieve bls sorted by redundant groups.
If Nbls is greater then the number of baselines in a redundant group
then return consecutive redundant groups with total baseline count
less then or equal to Nbls.
If Nbls is smaller then the number of baselines in a redundant group
then still return that group but raise a Warning.
Default is False
reds: list, optional
list of lists; each containing the antpairpols in each redundant group
must be provided if chunk_by_redundant_group is True.
bl_error_tol: float, optional
the largest allowable difference between baselines in a redundant group in meters.
(in the same units as antpos). Normally, this is up to 4x the largest antenna position error.
default is 1.0meters
include_autos: bool, optional
include autocorrelations in iteration if True.
Default is True.
frequencies: array-like, optional
optional list of float frequencies to load.
Default (None) loads all frequencies in data.
Yields:
data, flags, nsamples: DataContainers (see HERAData.read() for more info).
'''
if bls is None:
if self.filetype != 'uvh5':
raise NotImplementedError('Baseline iteration without explicitly setting bls for filetype ' + self.filetype
+ ' without setting bls has not been implemented.')
bls = self.bls
if isinstance(bls, dict): # multiple files
bls = list(set([bl for bls in bls.values() for bl in bls]))
bls = sorted(bls)
if not chunk_by_redundant_group:
if not include_autos:
# filter out autos if include_autos is False.
bls = [bl for bl in bls if bl[0] != bl[1]]
baseline_chunks = [bls[i:i + Nbls] for i in range(0, len(bls), Nbls)]
else:
if reds is None:
if self.filetype != 'uvh5':
raise NotImplementedError('Redundant group iteration without explicitly setting antpos for filetype ' + self.filetype
+ ' without setting antpos has not been implemented.')
# generate data_antpos dict to feed into get_reds
# that accounts for possibility that
# HERAData was initialized from multiple
# files in which case self.data_antpos is a dict of dicts.
if len(self.filepaths) > 1:
data_antpos = {}
for k in self.data_antpos:
data_antpos.update(self.data_antpos[k])
pols = set({})
for k in self.pols:
pols.union(set(self.pols[k]))
pols = list(pols)
else:
data_antpos = self.data_antpos
pols = self.pols
reds = redcal.get_reds(data_antpos, pols=pols, bl_error_tol=bl_error_tol,
include_autos=include_autos)
# filter reds by baselines
reds = redcal.filter_reds(reds, bls=bls)
# make sure that every baseline is in reds
baseline_chunks = chunk_baselines_by_redundant_groups(reds=reds, max_chunk_size=Nbls)
for chunk in baseline_chunks:
yield self.read(bls=chunk, frequencies=frequencies)
def iterate_over_freqs(self, Nchans=1, freqs=None):
'''Produces a generator that iteratively yields successive calls to
HERAData.read() by frequency channel or group of contiguous channels.
Arguments:
Nchans: number of frequencies to load at once.
freqs: optional user-provided list of frequencies to iterate over.
Default: use self.freqs (which only works for uvh5).
Yields:
data, flags, nsamples: DataContainers (see HERAData.read() for more info).
'''
if freqs is None:
if self.filetype != 'uvh5':
raise NotImplementedError('Frequency iteration for filetype ' + self.filetype
+ ' without setting freqs has not been implemented.')
freqs = self.freqs
if isinstance(self.freqs, dict): # multiple files
freqs = np.unique(list(self.freqs.values()))
for i in range(0, len(freqs), Nchans):
yield self.read(frequencies=freqs[i:i + Nchans])
def iterate_over_times(self, Nints=1, times=None):
'''Produces a generator that iteratively yields successive calls to
HERAData.read() by time or group of contiguous times. N.B. May
produce unexpected results for BDA data that has not been upsampled
or downsampled to a common time resolution.
Arguments:
Nints: number of integrations to load at once.
times: optional user-provided list of times to iterate over.
Default: use self.times (which only works for uvh5).
Yields:
data, flags, nsamples: DataContainers (see HERAData.read() for more info).
'''
if times is None:
if self.filetype != 'uvh5':
raise NotImplementedError('Time iteration for filetype ' + self.filetype
+ ' without setting times has not been implemented.')
times = self.times
if isinstance(times, dict): # multiple files
times = np.unique(list(times.values()))
for i in range(0, len(times), Nints):
yield self.read(times=times[i:i + Nints])
def init_HERACal(self, gain_convention='divide', cal_style='redundant'):
'''Produces a HERACal object using the metadata in this HERAData object.
Arguments:
gain_convention: str indicating whether gains are to calibrated by "multiply"ing or "divide"ing.
cal_style: str indicating how calibration was done, either "sky" or "redundant".
Returns:
HERACal object with gain, flag, quality, and total_quality arrays initialized (to 1, True, 0, and 0)
'''
# create UVCal object from self
uvc = UVCal().initialize_from_uvdata(self, gain_convention='divide', cal_style='redundant')
# create empty data arrays (using future array shapes, which is default true for initialize_from_uvdata)
uvc.gain_array = np.ones((uvc.Nants_data, uvc.Nfreqs, uvc.Ntimes, uvc.Njones), dtype=np.complex64)
uvc.flag_array = np.ones((uvc.Nants_data, uvc.Nfreqs, uvc.Ntimes, uvc.Njones), dtype=bool)
uvc.quality_array = np.zeros((uvc.Nants_data, uvc.Nfreqs, uvc.Ntimes, uvc.Njones), dtype=np.float32)
uvc.total_quality_array = np.zeros((uvc.Nfreqs, uvc.Ntimes, uvc.Njones), dtype=np.float32)
# convert to HERACal and return
return to_HERACal(uvc)
def empty_arrays(self):
'''Sets self.data_array and self.nsample_array to all zeros and self.flag_array to all True (if they are not None).'''
self.data_array = (np.zeros_like(self.data_array) if self.data_array is not None else None)
self.flag_array = (np.ones_like(self.flag_array) if self.flag_array is not None else None)
self.nsample_array = (np.zeros_like(self.nsample_array) if self.nsample_array is not None else None)
def read_hera_hdf5(filenames, bls=None, pols=None, full_read_thresh=0.002,
read_data=True, read_flags=False, read_nsamples=False,
check=False, dtype=np.complex128, verbose=False):
'''A potentially faster interface for reading HERA HDF5 files. Only concatenates
along time axis. Puts times in ascending order, but does not check that
files are contiguous. Currently not BDA compatible.
Arguments:
filenames: list of files to read
bls: list of (ant_1, ant_2, [polstr]) tuples to read out of files.
Default: all bls common to all files.
pols: list of pol strings to read out of files. Default: all, but is
superceded by any polstrs listed in bls.
full_read_thresh (0.002): fractional threshold for reading whole file
instead of baseline by baseline.
read_data (bool, True): read data
read_flags (bool, False): read flags
read_nsamples (bool, False): read nsamples
check (bool, False): run sanity checks to make sure files match.
dtype (np.complex128): numpy datatype for output complex-valued arrays
verbose: print some progress messages.
Returns:
rv: dict with keys 'info' and optionally 'data', 'flags', and 'nsamples',
based on whether read_data, read_flags, and read_nsamples are true.
rv['info']: metadata dict with keys 'freqs' (1D array), 'times' (1D array),
'pols' (list), 'ants' (1D array), 'antpos' (dict of antenna: 3D position),
'bls' (list of all (ant_1, ant_2) baselines in the file), 'data_ants' (1D array)
'latitude' (float in degrees), longitude (float in degrees), altitude (float in m)
rv['data']: dict of 2D data with (i, j, pol) keys.
rv['flags']: dict of 2D flags with (i, j, pol) keys.
rv['nsamples']: dict of 2D nsamples with (i, j, pol) keys.
'''
info = {}
times = []
bl2ind = {}
inds = {}
# Read file metadata to size up arrays and sort times
filenames = _parse_input_files(filenames, name='input_data')
for filename in filenames:
if verbose:
print(f'Reading header of {filename}')
with h5py.File(filename, 'r') as f:
h = f['/Header']
if check:
# Check that there aren't extra spectral windows
assert int(h['Nspws'][()]) == 1 # not a hera file
if len(times) == 0:
if len(h['freq_array'].shape) == 2: # old pyuvdata shapes with spectral windows
info['freqs'] = h['freq_array'][0] # make 1D instead of 2D
else:
info['freqs'] = h['freq_array'][()] # make 1D instead of 2D
nfreqs = info['freqs'].size
pol_array = h['polarization_array'][()]
npols = pol_array.size
# the following errors if x_orientation not set in this hdf5
x_orient = str(h['x_orientation'][()], encoding='utf-8')
pol_indices = {uvutils.parse_polstr(POL_NUM2STR_DICT[n], x_orientation=x_orient): cnt
for cnt, n in enumerate(pol_array)}
info['pols'] = list(pol_indices.keys())
info['ants'] = antenna_numbers = h['antenna_numbers'][()]
info['antpos'] = dict(zip(antenna_numbers, h['antenna_positions'][()]))
for coord in ['latitude', 'longitude', 'altitude']:
info[coord] = h[coord][()]
elif check:
# Check that all files have the same number of frequencies
assert int(h['Nfreqs'][()]) == nfreqs
# Determine blt ordering (baselines then times, or times then baselines)
ntimes = int(h['Ntimes'][()])
_times = h['time_array'][:ntimes]
time_first = (np.unique(_times).size == ntimes)
nbls = int(h['Nblts'][()]) // ntimes
if time_first:
# time-baseline ordering
ant1_array = h['ant_1_array'][::ntimes]
ant2_array = h['ant_2_array'][::ntimes]
else:
# baseline-time ordering
_times = h['time_array'][::nbls]
ant1_array = h['ant_1_array'][:nbls]
ant2_array = h['ant_2_array'][:nbls]
_info = {'time_first': time_first, 'ntimes': ntimes, 'nbls': nbls}
times.append((_times, filename, _info))
data_ants = set(ant1_array)
data_ants.update(set(ant2_array))
_hash = hash((ant1_array.tobytes(), ant2_array.tobytes(), time_first, ntimes))
# map baselines to array indices for each unique antenna order
if _hash not in inds:
if time_first:
inds[_hash] = {(i, j): slice(n * ntimes, (n + 1) * ntimes)
for n, (i, j) in enumerate(zip(ant1_array,
ant2_array))}
else:
inds[_hash] = {(i, j): slice(n, None, nbls)
for n, (i, j) in enumerate(zip(ant1_array,
ant2_array))}
if bls is not None:
# Make sure our baselines of interest are in this file
if not all([bl[:2] in inds[_hash] for bl in bls]):
missing_bls = [bl for bl in bls if bl[:2] not in inds[_hash]]
raise ValueError(f'File {filename} missing:' + str(missing_bls))
assert bl[:2] in inds[_hash]
if 'bls' not in info:
info['bls'] = set(inds[_hash].keys())
info['data_ants'] = data_ants
else:
info['bls'].intersection_update(set(inds[_hash].keys()))
info['data_ants'].intersection_update(data_ants)
bl2ind[filename] = inds[_hash]
if bls is None:
# generate a set of bls if we didn't have one passed in
if pols is None:
pols = list(pol_indices.keys())
bls = info['bls']
bls = set(bl + (p,) for bl in bls for p in pols)
else:
# if length 2 baselines are passed in, add on polarizations
bls_len2 = set(bl for bl in bls if len(bl) == 2)
if len(bls_len2) > 0:
if pols is None:
pols = list(pol_indices.keys())
bls = set(bl for bl in bls if len(bl) == 3)
bls = bls.union([bl + (p,) for bl in bls_len2 for p in pols])
# record polarizations as total of ones indexed in bls
pols = set(bl[2] for bl in bls)
# sort files by time of first integration
times.sort(key=lambda x: x[0][0])
info['times'] = np.concatenate([t[0] for t in times], axis=0)
tot_times = info['times'].size
# preallocate buffers
rv = {}
if read_data:
rv['visdata'] = {bl: np.empty((tot_times, nfreqs), dtype=dtype) for bl in bls}
if read_flags:
rv['flags'] = {bl: np.empty((tot_times, nfreqs), dtype=bool) for bl in bls}
if read_nsamples:
rv['nsamples'] = {bl: np.empty((tot_times, nfreqs), dtype=np.float32) for bl in bls}
# bail here if all we wanted was the info
if len(rv) == 0:
return {'info': info}
t = 0
for _times, filename, _info in times:
inds = bl2ind[filename]
ntimes = _info['ntimes']
nbls = _info['nbls']
if verbose:
print(f'Reading data from {filename}')
with h5py.File(filename, 'r') as f:
if check:
h = f['/Header']
assert ntimes == int(h['Ntimes'][()])
assert nbls == int(h['Nblts'][()]) // ntimes
# Check that files sorted correctly into time order
if _info['time_first']:
assert np.allclose(h['time_array'][:ntimes], _times)
else:
assert np.allclose(h['time_array'][::nbls], _times)
# decide whether to read all the data in, or use partial I/O
full_read = (len(bls) > full_read_thresh * nbls * npols)
if full_read and verbose:
print('Reading full file')
for key, data in rv.items():
d = f['/Data'][key] # data not read yet
if full_read:
d = d[()] # reads data
# Support old array shapes
if len(d.shape) == 4:
# Support polarization-transposed arrays
if d.shape[-1] == nfreqs:
def index_exp(i, j, p):
return np.index_exp[inds[i, j], 0, pol_indices[p]]
else:
def index_exp(i, j, p):
return np.index_exp[inds[i, j], 0, :, pol_indices[p]]
# Support new array shapes
if len(d.shape) == 3:
# Support polarization-transposed arrays
if d.shape[-1] == nfreqs:
def index_exp(i, j, p):
return np.index_exp[inds[i, j], pol_indices[p]]
else:
def index_exp(i, j, p):
return np.index_exp[inds[i, j], :, pol_indices[p]]
# handle HERA's raw (int) and calibrated (complex) file formats
if key == 'visdata' and not np.iscomplexobj(d):
for i, j, p in bls:
_d = d[index_exp(i, j, p)]
data[i, j, p][t:t + ntimes].real = _d['r']
data[i, j, p][t:t + ntimes].imag = _d['i']
else:
for i, j, p in bls:
data[i, j, p][t:t + ntimes] = d[index_exp(i, j, p)]
t += ntimes
# Quick renaming of data key for niceness
if 'visdata' in rv:
rv['data'] = rv.pop('visdata', [])
info['data_ants'] = np.array(sorted(info['data_ants']))
rv['info'] = info
return rv
class HERADataFastReader():
'''Wrapper class around read_hera_hdf5 meant to mimic the functionality of HERAData for drop-in replacement.'''
def __init__(self, input_data, read_metadata=True, check=False, skip_lsts=False):
'''Instantiates a HERADataFastReader object. Only supports reading uvh5 files, not writing them.
Does not support BDA and only supports patial i/o along baselines and polarization axes.
Arguments:
input_data: path or list of paths to uvh5 files.
read_metadata (bool, True): reads metadata from file and stores it internally to try to match HERAData
check (bool, False): run sanity checks to make sure files match.
skip_lsts (bool, False): save time by not computing LSTs from JDs
'''
# parse input_data as filepath(s)
self.filepaths = _parse_input_files(input_data, name='input_data')
# load metadata only
rv = {'info': {}}
if read_metadata:
rv = read_hera_hdf5(self.filepaths, read_data=False, read_flags=False, read_nsamples=False, check=False)
self._adapt_metadata(rv['info'], skip_lsts=skip_lsts)
# update metadata internally
self.info = rv['info']
for meta in HERAData.HERAData_metas:
if meta in rv['info']:
setattr(self, meta, rv['info'][meta])
else:
setattr(self, meta, None)
# create functions that error informatively when trying to use standard HERAData/UVData methods
for funcname in list(dir(HERAData)):
if funcname.startswith('__') and funcname.endswith('__'):
continue # don't overwrite things like __class__ and __init__
if funcname in ['read', '_make_datacontainer', '_HERAData_error']:
continue # don't overwrite functions with errors that we actually use
setattr(self, funcname, self._HERAData_error)
def _adapt_metadata(self, info_dict, skip_lsts=False):
'''Updates metadata from read_hera_hdf5 to better match HERAData. Updates info_dict in place.'''
info_dict['data_ants'] = sorted(info_dict['data_ants'])
info_dict['antpairs'] = sorted(info_dict['bls'])
info_dict['bls'] = sorted(set([ap + (pol, ) for ap in info_dict['antpairs'] for pol in info_dict['pols']]))
XYZ = XYZ_from_LatLonAlt(info_dict['latitude'] * np.pi / 180, info_dict['longitude'] * np.pi / 180, info_dict['altitude'])
enu_antpos = ENU_from_ECEF(
np.array([antpos for ant, antpos in info_dict['antpos'].items()]) + XYZ,
latitude=info_dict['latitude'] * np.pi / 180,
longitude=info_dict['longitude'] * np.pi / 180,
altitude=info_dict['altitude']
)
info_dict['antpos'] = {ant: enu for enu, ant in zip(enu_antpos, info_dict['antpos'])}
info_dict['data_antpos'] = {ant: info_dict['antpos'][ant] for ant in info_dict['data_ants']}
info_dict['times'] = np.unique(info_dict['times'])
info_dict['times_by_bl'] = {ap: info_dict['times'] for ap in info_dict['antpairs']}
info_dict['times_by_bl'].update({(a2, a1): info_dict['times'] for (a1, a2) in info_dict['antpairs']})
if not skip_lsts:
info_dict['lsts'] = JD2LST(info_dict['times'], info_dict['latitude'], info_dict['longitude'], info_dict['altitude'])
info_dict['lsts_by_bl'] = {ap: info_dict['lsts'] for ap in info_dict['antpairs']}
def _HERAData_error(self, *args, **kwargs):
raise NotImplementedError('HERADataFastReader does not support this method. Try HERAData instead.')
def read(self, bls=None, pols=None, full_read_thresh=0.002, read_data=True, read_flags=True,
read_nsamples=True, fix_autos_func=np.abs, check=False, dtype=np.complex128, verbose=False, skip_lsts=False):
'''A faster read that only concatenates along the time axis. Puts times in ascending order, but does not
check that files are contiguous. Currently not BDA compatible.
Arguments:
bls: list of (ant_1, ant_2, [polstr]) tuples to read out of files. Default: all bls common to all files.
pols: list of pol strings to read out of files. Default: all, but is superceded by any polstrs listed in bls.
full_read_thresh (0.002): fractional threshold for reading whole file instead of baseline by baseline.
read_data (bool, True): read data
read_flags (bool, True): read flags
read_nsamples (bool, True): read nsamples
fix_autos_func (function, np.abs): function to apply to autocorrelations to enforce, for example, that
they are real. Default is np.abs, which matches UVData._fix_autos(). Use np.real for loading diff data.
check (bool, False): run sanity checks to make sure files match.
dtype (np.complex128): numpy datatype for output complex-valued arrays
verbose: print some progress messages.
skip_lsts (bool, False): save time by not computing LSTs from JDs
Returns:
data: DataContainer mapping baseline keys to complex visibility waterfalls (if read_data is True, else None)
flags: DataContainer mapping baseline keys to boolean flag waterfalls (if read_flags is True, else None)
nsamples: DataContainer mapping baseline keys to interger Nsamples waterfalls (if read_nsamples is True, else None)
'''
rv = read_hera_hdf5(self.filepaths, bls=bls, pols=pols, full_read_thresh=full_read_thresh,
read_data=read_data, read_flags=read_flags, read_nsamples=read_nsamples,
check=check, dtype=dtype, verbose=verbose)
self._adapt_metadata(rv['info'], skip_lsts=skip_lsts)
# make autocorrleations real by taking the real part. Using fix_autos_func = np.abs matches UVData._fix_autos()
if 'data' in rv:
for bl in rv['data']:
if split_bl(bl)[0] == split_bl(bl)[1]:
rv['data'][bl] = fix_autos_func(rv['data'][bl])
# construct datacontainers from result
return self._make_datacontainer(rv, 'data'), self._make_datacontainer(rv, 'flags'), self._make_datacontainer(rv, 'nsamples')
def _make_datacontainer(self, rv, key='data'):
'''Converts outputs from read_hera_hdf5 to a more standard HERAData output.'''
if key not in rv:
return None
# construct datacontainer with whatever metadata is available
dc = DataContainer(rv[key])
for meta in HERAData.HERAData_metas:
if meta in rv['info'] and meta not in ['pols', 'antpairs', 'bls']: # these are functions on datacontainers
setattr(dc, meta, copy.deepcopy(rv['info'][meta]))
return dc
def read_filter_cache_scratch(cache_dir):
"""
Load files from a cache specified by cache_dir.
cache files are intended to serve as common short-term on-disk scratch for filtering matrices
that can be loaded by multiple compute nodes process a night and save computational time by avoiding
recomputing filter matrices (that often involve psuedo-inverses).
A node processing a single chunk will be able to read in any cache matrices that were already
computed from previous chunks.
cache files are named with randomly generated strings with the extension ".filter_cache". They
are not intended for long-term or cross-platform storage and are currently designed to be deleted at the end
of processing night.
Parameters
----------
cache_dir, string, path to a folder that is used for the cache
files in this folder with an extension .filter_cache are assumed
to be cache files. These files are pickled caches from previous filtering runs.
"""
# Load up the cache file with the most keys (precomputed filter matrices).
cache = {}
cache_files = glob.glob(cache_dir + '/*.filter_cache')
# loop through cache files, load them.
# If there are new keys, add them to internal cache.
# If not, delete the reference matrices from memory.
for cache_file in cache_files:
with open(cache_file, 'rb') as cfile:
cache_t = pickle.load(cfile)
for key in cache_t:
if key not in cache:
cache[key] = cache_t[key]
return cache
def write_filter_cache_scratch(filter_cache, cache_dir=None, skip_keys=None):
"""
write cached cache to a new cache file.
cache files are intended to serve as common short-term on-disk scratch for filtering matrices
that can be loaded by multiple compute nodes process a night and save computational time by avoiding
recomputing filter matrices (that often involve psuedo-inverses).
A node processing a single chunk will be able to read in any cache matrices that were already
computed from previous chunks.
cache files are named with randomly generated strings with the extension ".filter_cache". They
are not intended for long-term or cross-platform storage and are currently designed to be deleted at the end
of processing night.
Parameters
----------
filter_cache, dict, dictionary of values that we wish to cache.
cache_dir, string, optional, path to a folder that is used for the cache
files in this folder with an extension .filter_cache are assumed
to be cache files. These files are pickled caches from previous filtering runs.
default, current working directory.
skip_keys, list, list of keys to skip in writing the filter_cache.
"""
if skip_keys is None:
skip_keys = []
# if the keys_before instantiation wasn't a list, then
# keys_before would just be the current keys of cache and we
# wouldn't have any new keys.
new_filters = {k: filter_cache[k] for k in filter_cache if k not in skip_keys}
if len(new_filters) > 0:
# generate new file name
if cache_dir is None:
cache_dir = os.getcwd()
cache_file_name = '%032x' % random.getrandbits(128) + '.filter_cache'
with open(os.path.join(cache_dir, cache_file_name), 'ab') as cfile:
pickle.dump(new_filters, cfile)
else:
warnings.warn("No new keys provided. No cache file written.")
def _pol2str(pol: np.integer | str, x_orientation: str, jpol: bool = False) -> str:
fnc = jnum2str if jpol else polnum2str
if np.issubdtype(pol.dtype, np.signedinteger):
return fnc(pol, x_orientation=x_orientation) # convert to string if possible
else:
return ','.join([fnc(int(p), x_orientation=x_orientation) for p in pol.split(',')])
def load_flags(flagfile, filetype='h5', return_meta=False):
'''Load flags from a file and returns them as a DataContainer (for per-visibility flags)
or dictionary (for per-antenna or per-polarization flags). More than one spectral window
is not supported. Assumes times are evenly-spaced and in order for each baseline.
Arguments:
flagfile: path to file containing flags and flagging metadata
filetype: either 'h5' or 'npz'. 'h5' assumes the file is readable as a hera_qm
UVFlag object in the 'flag' mode (could be by baseline, by antenna, or by
polarization). 'npz' provides legacy support for the IDR2.1 flagging npzs,
but only for per-visibility flags.
return_meta: if True, return a metadata dictionary with, e.g., 'times', 'freqs', 'history'
Returns:
flags: dictionary or DataContainer mapping keys to Ntimes x Nfreqs numpy arrays.
if 'h5' and 'baseline' mode or 'npz': DataContainer with keys like (0,1,'nn')
if 'h5' and 'antenna' mode: dictionary with keys like (0,'Jnn')
if 'h5' and 'waterfall' mode: dictionary with keys like 'Jnn'
meta: (only returned if return_meta is True)
'''
flags = {}
if filetype not in ['h5', 'npz']:
raise ValueError("filetype must be 'h5' or 'npz'.")
elif filetype == 'h5':
from pyuvdata import UVFlag
uvf = UVFlag(flagfile)
assert uvf.mode == 'flag', f'The input h5-based UVFlag object must be in flag mode, got {uvf.mode}'
assert (np.issubdtype(uvf.polarization_array.dtype, np.signedinteger)
or np.issubdtype(uvf.polarization_array.dtype, np.str_)), \
"The input h5-based UVFlag object's polarization_array must be integers or byte strings."
freqs = np.unique(uvf.freq_array)
times = np.unique(uvf.time_array)
history = uvf.history
if uvf.type == 'baseline': # one time x freq waterfall per baseline
blt_slices = get_blt_slices(uvf)
for ip, pol in enumerate(uvf.polarization_array):
pol = _pol2str(pol, uvf.telescope.x_orientation)
for (ant1, ant2), blt_slice in blt_slices.items():
flags[(ant1, ant2, pol)] = uvf.flag_array[blt_slice, :, ip]
# data container only supports standard polarizations strings
if np.issubdtype(uvf.polarization_array.dtype, np.signedinteger):
flags = DataContainer(flags)
flags.times = times
flags.freqs = freqs
elif uvf.type == 'antenna': # one time x freq waterfall per antenna
for ip, jpol in enumerate(uvf.polarization_array):
jpol = _pol2str(jpol, x_orientation=uvf.telescope.x_orientation, jpol=True)
for i, ant in enumerate(uvf.ant_array):
flags[(ant, jpol)] = np.array(uvf.flag_array[i, :, :, ip].T)
elif uvf.type == 'waterfall': # one time x freq waterfall (per visibility polarization)
for ip, jpol in enumerate(uvf.polarization_array):
jpol = _pol2str(jpol, x_orientation=uvf.telescope.x_orientation, jpol=True)
flags[jpol] = uvf.flag_array[:, :, ip]
elif filetype == 'npz': # legacy support for IDR 2.1 npz format
npz = np.load(flagfile)
pols = [polnum2str(p) for p in npz['polarization_array']]
freqs = np.unique(npz['freq_array'])
times = np.unique(npz['time_array'])
history = npz['history']
nAntpairs = len(npz['antpairs'])
assert npz['flag_array'].shape[0] == nAntpairs * len(times), \
'flag_array must have flags for all baselines for all times.'
for p, pol in enumerate(pols):
flag_array = np.reshape(npz['flag_array'][:, 0, :, p], (len(times), nAntpairs, len(freqs)))
for n, (i, j) in enumerate(npz['antpairs']):
flags[i, j, pol] = flag_array[:, n, :]
flags = DataContainer(flags)
if return_meta:
return flags, {'freqs': freqs, 'times': times, 'history': history}
else:
return flags
def get_file_times(filepaths, filetype='uvh5'):
"""
Get a file's lst_array in radians and time_array in Julian Date.
Some caveats:
- Miriad standard is bin start, so a shift by int_time / 2 is performed.
uvh5 standard is bin center, so times are left untouched.
- Miriad files do not support baseline-dependent averaging (BDA).
- With BDA for uvh5 files, the results will correspond to the least-averaged
baseline in the file.
- With uvh5 files with a single integration, it is assumed that the integration
time and dtime are the same. This may not be true in LST-binned files.
Args:
filepaths : type=list or str, filepath or list of filepaths
filetype : str, options=['miriad', 'uvh5']
Returns:
dlst : ndarray (or float if filepaths is a string) of lst bin width [radian]
dtime : ndarray (or float if filepaths is a string) of time bin width [Julian Date]
file_lst_arrays : list of ndarrays (or list of floats if filepaths is a string)
of unwrapped lst_array [radians]
file_time_arrays : list of ndarrays (or list of floats if filepaths is a string)
of time_array [Julian Date]
"""
_array = True
# check filepaths type
if isinstance(filepaths, (str, Path)):
_array = False
filepaths = [filepaths]
if filetype not in ['miriad', 'uvh5']:
raise ValueError("filetype {} not recognized".format(filetype))
# form empty lists
dlsts = []
dtimes = []
file_lst_arrays = []
file_time_arrays = []
# get Nfiles
Nfiles = len(filepaths)
# iterate over filepaths and extract time info
for i, f in enumerate(filepaths):
if filetype == 'miriad':
assert AIPY, "you need aipy to use the miriad filetype"
uv = aipy.miriad.UV(f)
# get integration time
int_time = uv['inttime'] / (units.si.day.in_units(units.si.s))
int_time_rad = uv['inttime'] * 2 * np.pi / (units.si.sday.in_units(units.si.s))
# get start and stop, add half an integration
start_lst = uv['lst'] + int_time_rad / 2.0
start_time = uv['time'] + int_time / 2.0
# form time arrays
lst_array = (start_lst + np.arange(uv['ntimes']) * int_time_rad) % (2 * np.pi)
time_array = start_time + np.arange(uv['ntimes']) * int_time
elif filetype == 'uvh5':
# get times directly from uvh5 file's header: faster than loading entire file via HERAData
with h5py.File(f, mode='r') as _f:
# pull out time_array and lst_array
time_array = np.ravel(_f[u'Header'][u'time_array'])
if u'lst_array' in _f[u'Header']:
lst_array = np.ravel(_f[u'Header'][u'lst_array'])
else:
# need to generate lst_array on the fly
lst_array = np.ravel(uvutils.get_lst_for_time(_f[u'Header'][u'time_array'],
latitude=_f[u'Header'][u'latitude'][()],
longitude=_f[u'Header'][u'longitude'][()],
altitude=_f[u'Header'][u'altitude'][()]))
# figure out which baseline has the most times in order to handle BDA appropriately
baseline_array = uvutils.antnums_to_baseline(
np.array(_f[u'Header'][u'ant_1_array']),
np.array(_f[u'Header'][u'ant_2_array']),
Nants_telescope=np.array(_f[u'Header'][u'Nants_telescope'])
)
most_common_bl_num = scipy.stats.mode(baseline_array, keepdims=True)[0][0]
time_array = time_array[baseline_array == most_common_bl_num]
lst_array = lst_array[baseline_array == most_common_bl_num]
# figure out dtime and dlst, handling the case where a diff cannot be done.
if len(time_array) > 1:
int_time = np.median(np.diff(time_array))
int_time_rad = np.median(np.diff(lst_array))
else:
warnings.warn(f'{f} has only one time, so we assume that dtime is the minimum '
'integration time. This may be incorrect for LST-binned files.')
int_time = np.min(_f[u'Header'][u'integration_time']) / units.day.to(units.si.s)
int_time_rad = int_time / units.sday.to(units.day) * 2 * np.pi
dlsts.append(int_time_rad)
dtimes.append(int_time)
file_lst_arrays.append(lst_array)
file_time_arrays.append(time_array)
dlsts = np.asarray(dlsts)
dtimes = np.asarray(dtimes)
if _array is False:
return dlsts[0], dtimes[0], file_lst_arrays[0], file_time_arrays[0]
else:
return dlsts, dtimes, file_lst_arrays, file_time_arrays
def partial_time_io(hd, times=None, time_range=None, lsts=None, lst_range=None, **kwargs):
'''Perform partial io with a time-select on a HERAData object, even if it is intialized
using multiple files, some of which do not contain any of the specified times.
Note: can only use one of times, time_range, lsts, lst_range
Arguments:
hd: HERAData object intialized with (usually multiple) uvh5 files
times: list of times in JD to load
time_range: length-2 array-like of range of JDs to load
lsts: list of lsts in radians to load
lst_range: length-2 array-like of range of lsts in radians to load.
If the 0th element is greater than the 1st, the range will wrap around 2pi
kwargs: other partial i/o kwargs (see io.HERAData.read)
Returns:
data: DataContainer mapping baseline keys to complex visibility waterfalls
flags: DataContainer mapping baseline keys to boolean flag waterfalls
nsamples: DataContainer mapping baseline keys to interger Nsamples waterfalls
'''
assert hd.filetype == 'uvh5', 'This function only works for uvh5-based HERAData objects.'
if np.sum([times is not None, time_range is not None, lsts is not None, lst_range is not None]) > 1:
raise ValueError('Only one of times, time_range, lsts, and lsts_range can be not None.')
combined_hd = None
for f in hd.filepaths:
hd_here = HERAData(f, upsample=hd.upsample, downsample=hd.downsample)
# check if any of the selected times are in this particular file
if times is not None:
times_here = [time for time in times if time in hd_here.times]
if len(times_here) == 0:
continue # skip this file
else:
times_here = None
# check if any of the selected lsts are in this particular file
if lsts is not None:
lsts_here = [lst for lst in lsts if lst in hd_here.lsts]
if len(lsts_here) == 0:
continue # skip this file
else:
lsts_here = None
# attempt to read this file's data
try:
hd_here.read(times=times_here, time_range=time_range,
lsts=lsts_here, lst_range=lst_range,
return_data=False, **kwargs)
except ValueError as err:
# check to see if the read failed because of the time range or lst range
if 'No elements in time range between ' in str(err) or 'No elements in time_array between ' in str(err):
continue # no matching times, skip this file
elif 'No elements in LST range between ' in str(err) or 'No elements in lst_array between ' in str(err):
continue # no matchings lsts, skip this file
else:
raise
if combined_hd is None:
combined_hd = hd_here
else:
combined_hd += hd_here
if combined_hd is None:
raise ValueError('No times or lsts matched any of the files in hd.')
combined_hd = to_HERAData(combined_hd) # re-runs the slicing and indexing
return combined_hd.build_datacontainers()
def save_redcal_meta(meta_filename, fc_meta, omni_meta, freqs, times, lsts, antpos, history, clobber=True):
'''Saves redcal metadata to a hdf5 file. See also read_redcal_meta.
Arguments:
meta_filename: path to hdf5 file to save
fc_meta: firstcal metadata dictionary, such as that produced by redcal.redcal_iteration()
omni_meta: omnical metadata dictionary, such as that produced by redcal.redcal_iteration()
freqs: 1D numpy array of frequencies in the data
times: 1D numpy array of times in the data
lsts: 1D numpy array of LSTs in the data
antpos: dictionary of antenna positions in the form {ant_index: np.array([x,y,z])}
history: string describing the creation of this file
clobber: If False and meta_filename exists, raise OSError.
'''
if os.path.exists(meta_filename) and not clobber:
raise OSError(f'{meta_filename} already exists but clobber=False.')
with h5py.File(meta_filename, "w") as outfile:
# save the metadata of the metadata
header = outfile.create_group('header')
header['freqs'] = freqs
header['times'] = times
header['lsts'] = lsts
antnums = np.array(sorted(list(antpos.keys())))
header['antpos'] = np.array([antpos[antnum] for antnum in antnums])
header['antpos'].attrs['antnums'] = antnums
header['history'] = np.bytes_(history)
# save firstcal metadata, saving dictionary keys as attrs
fc_grp = outfile.create_group('fc_meta')
ant_keys = sorted(list(fc_meta['dlys'].keys()))
fc_grp['dlys'] = np.array([fc_meta['dlys'][ant] for ant in ant_keys])
fc_grp['dlys'].attrs['ants'] = np.bytes_(ant_keys)
fc_grp['polarity_flips'] = np.array([fc_meta['polarity_flips'][ant] for ant in ant_keys])
fc_grp['polarity_flips'].attrs['ants'] = np.bytes_(ant_keys)
# save the omnical metadata, saving dictionary keys as attrs
omni_grp = outfile.create_group('omni_meta')
pols_keys = sorted(list(omni_meta['chisq'].keys()))
omni_grp['chisq'] = np.array([omni_meta['chisq'][pols] for pols in pols_keys])
omni_grp['chisq'].attrs['pols'] = pols_keys
omni_grp['iter'] = np.array([omni_meta['iter'][pols] for pols in pols_keys])
omni_grp['iter'].attrs['pols'] = pols_keys
omni_grp['conv_crit'] = np.array([omni_meta['conv_crit'][pols] for pols in pols_keys])
omni_grp['conv_crit'].attrs['conv_crit'] = np.bytes_(pols_keys)
def read_redcal_meta(meta_filename):
'''Reads redcal metadata to a hdf5 file. See also save_redcal_meta.
Arguments:
meta_filename: path to hdf5 file to load
Returns:
fc_meta: firstcal metadata dictionary, such as that produced by redcal.redcal_iteration()
omni_meta: omnical metadata dictionary, such as that produced by redcal.redcal_iteration()
freqs: 1D numpy array of frequencies in the data
times: 1D numpy array of times in the data
lsts: 1D numpy array of LSTs in the data
antpos: dictionary of antenna positions in the form {ant_index: np.array([x,y,z])}
history: string describing the creation of this file
'''
with h5py.File(meta_filename, "r") as infile:
# decode metadata of metadata
freqs = infile['header']['freqs'][:]
times = infile['header']['times'][:]
lsts = infile['header']['lsts'][:]
antpos = {ant: pos for ant, pos in zip(infile['header']['antpos'].attrs['antnums'],
infile['header']['antpos'][:, :])}
history = infile['header']['history'][()].tobytes().decode('utf8')
# reconstruct firstcal metadata
fc_meta = {}
ants = [(int(num.tobytes().decode('utf8')), pol.tobytes().decode('utf8'))
for num, pol in infile['fc_meta']['dlys'].attrs['ants']]
fc_meta['dlys'] = {ant: dly for ant, dly in zip(ants, infile['fc_meta']['dlys'][:, :])}
fc_meta['polarity_flips'] = {ant: flips for ant, flips in zip(ants, infile['fc_meta']['polarity_flips'][:, :])}
# reconstruct omnical metadata
omni_meta = {}
pols_keys = infile['omni_meta']['chisq'].attrs['pols']
omni_meta['chisq'] = {pols: chisq for pols, chisq in zip(pols_keys, infile['omni_meta']['chisq'][:, :])}
omni_meta['iter'] = {pols: itr for pols, itr in zip(pols_keys, infile['omni_meta']['iter'][:, :])}
omni_meta['conv_crit'] = {pols: cc for pols, cc in zip(pols_keys, infile['omni_meta']['conv_crit'][:, :])}
return fc_meta, omni_meta, freqs, times, lsts, antpos, history
#######################################################################
# LEGACY CODE
#######################################################################
def to_HERAData(input_data, filetype='miriad', **read_kwargs):
'''Converts a string path, UVData, or HERAData object, or a list of any one of those, to a
single HERAData object without loading any new data.
Arguments:
input_data: data file path, or UVData/HERAData instance, or list of either strings of data
file paths or list of UVData/HERAData instances to combine into a single HERAData object
filetype: 'miriad', 'uvfits', or 'uvh5'. Ignored if input_data is UVData/HERAData objects
read_kwargs : kwargs to pass to UVData.read (e.g. run_check, check_extra and
run_check_acceptability). Only used for uvh5 filetype
Returns:
hd: HERAData object. Will not have data loaded if initialized from string(s).
'''
if filetype not in ['miriad', 'uvfits', 'uvh5']:
raise NotImplementedError("Data filetype must be 'miriad', 'uvfits', or 'uvh5'.")
if isinstance(input_data, (str, Path)): # single visibility data path
return HERAData(input_data, filetype=filetype, **read_kwargs)
elif isinstance(input_data, HERAData): # already a HERAData object
return input_data
elif isinstance(input_data, UVData): # single UVData object
hd = input_data
hd.__class__ = HERAData
hd._determine_blt_slicing()
if filetype == 'uvh5':
hd._attach_metadata()
hd.filepaths = None
return hd
elif isinstance(input_data, Iterable): # List loading
if np.all([isinstance(i, (str, Path)) for i in input_data]): # List of visibility data paths
return HERAData(input_data, filetype=filetype, **read_kwargs)
elif np.all([isinstance(i, (UVData, HERAData)) for i in input_data]): # List of uvdata objects
hd = reduce(operator.add, input_data)
hd.__class__ = HERAData
hd._determine_blt_slicing()
return hd
else:
raise TypeError('If input is a list, it must be only strings or only UVData/HERAData objects.')
else:
raise TypeError('Input must be a UVData/HERAData object, a string, or a list of either.')
def load_vis(input_data, return_meta=False, filetype='miriad', pop_autos=False, pick_data_ants=True, nested_dict=False, **read_kwargs):
'''Load miriad or uvfits files or UVData/HERAData objects into DataContainers, optionally returning
the most useful metadata. More than one spectral window is not supported. Assumes every baseline
has the same times present and that the times are in order.
Arguments:
input_data: data file path, or UVData/HERAData instance, or list of either strings of data
file paths or list of UVData/HERAData instances to concatenate into a single dictionary
return_meta: boolean, if True: also return antpos, ants, freqs, times, lsts, and pols
filetype: 'miriad', 'uvfits', or 'uvh5'. Ignored if input_data is UVData/HERAData objects
pop_autos: boolean, if True: remove autocorrelations
pick_data_ants: boolean, if True and return_meta=True, return only antennas in data
nested_dict: boolean, if True replace DataContainers with the legacy nested dictionary filetype
where visibilities and flags are accessed as data[(0,1)]['nn']
read_kwargs : keyword arguments to pass to HERAData.read()
Returns:
if return_meta is True:
(data, flags, antpos, ants, freqs, times, lsts, pols)
else:
(data, flags)
data: DataContainer containing baseline-pol complex visibility data with keys
like (0,1,'nn') and with shape=(Ntimes,Nfreqs)
flags: DataContainer containing data flags
antpos: dictionary containing antennas numbers as keys and position vectors
ants: ndarray containing unique antenna indices
freqs: ndarray containing frequency channels (Hz)
times: ndarray containing julian date bins of data
lsts: ndarray containing LST bins of data (radians)
pol: ndarray containing list of polarization strings
'''
hd = to_HERAData(input_data, filetype=filetype)
if hd.data_array is not None:
d, f, n = hd.build_datacontainers()
else:
d, f, n = hd.read(**read_kwargs)
# remove autos if requested
if pop_autos:
for k in list(d.keys()):
if k[0] == k[1]:
del d[k], f[k], n[k]
# convert into nested dict if necessary
if nested_dict:
data, flags = odict(), odict()
antpairs = [key[0:2] for key in d.keys()]
for ap in antpairs:
data[ap] = d[ap]
flags[ap] = f[ap]
else:
data, flags = d, f
# get meta
if return_meta:
antpos = utils.get_ENU_antpos(hd, asdict=True)
return data, flags, antpos, list(antpos.keys()), d.freqs, d.times, d.lsts, d.pols()
else:
return data, flags
def write_vis(fname, data, lst_array, freq_array, antpos, time_array=None, flags=None, nsamples=None,
filetype='miriad', write_file=True, outdir="./", overwrite=False, verbose=True, history=" ",
return_uvd=False, start_jd=None, lst_branch_cut=0.0, x_orientation="north", instrument="HERA",
telescope_name="HERA", object_name='EOR', vis_units='uncalib', dec=-30.72152,
telescope_location=HERA_TELESCOPE_LOCATION, integration_time=None,
**kwargs):
"""
Take DataContainer dictionary, export to UVData object and write to file. See pyuvdata.UVdata
documentation for more info on these attributes.
Parameters:
-----------
fname : type=str, output filename of visibliity data
data : type=DataContainer, holds complex visibility data.
lst_array : type=float ndarray, contains unique LST time bins [radians] of data (center of integration).
freq_array : type=ndarray, contains frequency bins of data [Hz].
antpos : type=dictionary, antenna position dictionary. keys are antenna integers and values
are position vectors in meters in ENU (TOPO) frame.
time_array : type=ndarray, contains unique Julian Date time bins of data (center of integration).
flags : type=DataContainer or array, holds data flags, matching data in shape.
nsamples : type=DataContainer or array, holds number of points averaged into each bin in data
(if applicable).
filetype : type=str, filetype to write-out, options=['miriad'].
write_file : type=boolean, write UVData to file if True.
outdir : type=str, output directory for output file.
overwrite : type=boolean, if True, overwrite output files.
verbose : type=boolean, if True, report feedback to stdout.
history : type=str, history string for UVData object
return_uvd : type=boolean, if True return UVData instance.
start_jd : type=float, starting integer Julian Date of time_array if time_array is None.
lst_branch_cut : type=float, LST of data start, ensures that LSTs lower than this are wrapped around
and correspond to higher JDs in time_array, but only if time_array is None [radians]
x_orientation : type=str, orientation of X dipole, options=['east', 'north']
instrument : type=str, instrument name.
telescope_name : type=str, telescope name.
object_name : type=str, observing object name.
vis_unit : type=str, visibility units.
dec : type=float, declination of observer in degrees North.
telescope_location : type=ndarray, telescope location in xyz in ITRF (earth-centered frame).
integration_time : type=float or ndarray, integration duration in seconds for data_array.
This does not necessarily have to be equal to the diff(time_array): for the case of
LST-binning, this is not the duration of the LST-bin but the integration time of the
pre-binned data. Default is median(diff(time_array)) in seconds. Note: the _total_
integration time in a visibility is integration_time * nsamples.
kwargs : type=dictionary, additional parameters to set in UVData object.
Output:
-------
if return_uvd: return UVData instance
"""
# configure UVData parameters
# get pols
pols = np.unique([k[-1] for k in data.keys()])
polarization_array = np.array([polstr2num(p, x_orientation=x_orientation) for p in pols])
# get telescope ants
antenna_numbers = np.unique(list(antpos.keys()))
antenna_names = [f"HH{a}" for a in antenna_numbers]
# get antenna positions in ITRF frame
lat, lon, alt = uvutils.LatLonAlt_from_XYZ(telescope_location)
antenna_positions = np.array([antpos[k] for k in antenna_numbers])
antenna_positions = uvutils.ECEF_from_ENU(
antenna_positions, latitude=lat, longitude=lon, altitude=alt
) - telescope_location
# get times
if time_array is None:
if start_jd is None:
raise AttributeError("if time_array is not fed, start_jd must be fed")
time_array = LST2JD(
lst_array,
start_jd,
allow_other_jd=True,
lst_branch_cut=lst_branch_cut,
latitude=(lat * 180 / np.pi),
longitude=(lon * 180 / np.pi),
altitude=alt
)
antpairs = sorted(data.antpairs())
tel_loc_obj = EarthLocation.from_geocentric(*telescope_location, unit="m")
# create an object with empty data-size arrays using UVData.new()
if hasattr(UVData(), "telescope"):
tel_params = {
"telescope": Telescope.new(
name=telescope_name,
location=tel_loc_obj,
antenna_numbers=antenna_numbers,
antenna_names=antenna_names,
antenna_positions=antenna_positions,
instrument=instrument,
x_orientation=x_orientation,
)
}
else:
tel_params = {
"telescope_name": telescope_name,
"telescope_location": tel_loc_obj,
"antenna_numbers": antenna_numbers,
"antenna_names": antenna_names,
"antenna_positions": antenna_positions,
"instrument": instrument,
"x_orientation": x_orientation,
}
uvd = UVData.new(
freq_array=freq_array,
polarization_array=polarization_array,
times=time_array,
antpairs=antpairs,
time_axis_faster_than_bls=True,
do_blt_outer=True,
empty=True,
history=history,
**tel_params,
)
# set data
for antpair in antpairs:
for pol in pols:
key1, key2 = antpair
pol_num = polstr2num(pol, x_orientation=x_orientation)
uvd.set_data(data[antpair + (str(pol),)][:, :, np.newaxis], key1, key2, pol_num)
if nsamples is not None:
uvd.set_nsamples(nsamples[antpair + (str(pol),)][:, :, np.newaxis], key1, key2, pol_num)
if flags is not None:
uvd.set_flags(flags[antpair + (str(pol),)][:, :, np.newaxis], key1, key2, pol_num)
if nsamples is None:
# default nsamples to all ones
uvd.nsample_array = np.full_like(uvd.nsample_array, 1.0)
# write to file
if write_file:
# check output
fname = os.path.join(outdir, fname)
if os.path.exists(fname) and overwrite is False:
if verbose:
print("{} exists, not overwriting".format(fname))
else:
if verbose:
print("saving {}".format(fname))
if filetype == 'miriad':
uvd.write_miriad(fname, clobber=True)
elif filetype == 'uvh5':
uvd.write_uvh5(fname, clobber=True)
else:
raise AttributeError("didn't recognize filetype: {}".format(filetype))
if return_uvd:
return uvd
def update_uvdata(uvd, data=None, flags=None, nsamples=None, add_to_history='', **kwargs):
'''Updates a UVData/HERAData object with data or parameters. Cannot modify the shape of
data arrays. More than one spectral window is not supported. Assumes every baseline
has the same times present and that the times are in order.
Arguments:
uv: UVData/HERAData object to be updated
data: dictionary or DataContainer of complex visibility data to update. Keys
like (0,1,'nn') and shape=(Ntimes,Nfreqs). Default (None) does not update.
flags: dictionary or DataContainer of data flags to update.
Default (None) does not update.
nsamples: dictionary or DataContainer of nsamples to update.
Default (None) does not update.
add_to_history: appends a string to the history of the UVData/HERAData object
kwargs: dictionary mapping updated attributs to their new values.
See pyuvdata.UVData documentation for more info.
'''
# perform update
original_class = uvd.__class__
uvd = to_HERAData(uvd)
uvd.update(data=data, flags=flags, nsamples=nsamples)
uvd.__class__ = original_class
# set additional attributes
uvd.history += add_to_history
for attribute, value in kwargs.items():
if '.' in attribute:
top, bot = attribute.split('.')
getattr(uvd, top).__setattr__(bot, value)
else:
uvd.__setattr__(attribute, value)
uvd.check()
def _write_HERAData_to_filetype(hd, outfilename, filetype_out='miriad', clobber=False):
'''Helper function for update_vis().'''
if filetype_out == 'miriad':
hd.write_miriad(outfilename, clobber=clobber)
elif filetype_out == 'uvfits':
hd.write_uvfits(outfilename, force_phase=True)
elif filetype_out == 'uvh5':
hd.write_uvh5(outfilename, clobber=clobber)
else:
raise TypeError("Input filetype must be either 'miriad', 'uvfits', or 'uvh5'.")
def update_vis(infilename, outfilename, filetype_in='miriad', filetype_out='miriad',
data=None, flags=None, nsamples=None, add_to_history='', clobber=False, **kwargs):
'''Loads an existing file with pyuvdata, modifies some subset of of its parameters, and
then writes a new file to disk. Cannot modify the shape of data arrays. More than one
spectral window is not supported. Assumes every baseline has the same times present
and that the times are in order.
Arguments:
infilename: filename of the base visibility file to be updated, or UVData/HERAData object
outfilename: filename of the new visibility file
filetype_in: either 'miriad' or 'uvfits' (ignored if infile is a UVData/HERAData object)
filetype_out: either 'miriad' or 'uvfits'
data: dictionary or DataContainer of complex visibility data to update. Keys
like (0,1,'nn') and shape=(Ntimes,Nfreqs). Default (None) does not update.
flags: dictionary or DataContainer of data flags to update.
Default (None) does not update.
nsamples: dictionary or DataContainer of nsamples to update.
Default (None) does not update.
add_to_history: appends a string to the history of the output file
clobber: if True, overwrites existing file at outfilename. Always True for uvfits.
kwargs: dictionary mapping updated attributs to their new values.
See pyuvdata.UVData documentation for more info.
'''
# Load infile
if isinstance(infilename, (UVData, HERAData)):
hd = copy.deepcopy(infilename)
else:
hd = HERAData(infilename, filetype=filetype_in)
hd.read()
update_uvdata(hd, data=data, flags=flags, nsamples=nsamples, add_to_history=add_to_history, **kwargs)
# write out results
_write_HERAData_to_filetype(hd, outfilename, filetype_out=filetype_out, clobber=clobber)
def to_HERACal(input_cal):
'''Converts a string path, UVCal, or HERACal object, or a list of any one of those, to a
single HERACal object without loading any new calibration solutions.
Arguments:
input_cal: path to calfits file, UVCal/HERACal object, or a list of either to combine
into a single HERACal object
Returns:
hc: HERACal object. Will not have calibration loaded if initialized from string(s).
'''
if isinstance(input_cal, (str, Path)): # single calfits path
return HERACal(input_cal)
if isinstance(input_cal, HERACal): # single HERACal
return input_cal
elif isinstance(input_cal, UVCal): # single UVCal object
input_cal.__class__ = HERACal
input_cal.filepaths = None
input_cal._extract_metadata() # initialize metadata vars.
return input_cal
elif isinstance(input_cal, Iterable): # List loading
if np.all([isinstance(ic, (str, Path)) for ic in input_cal]): # List of calfits paths
return HERACal(input_cal)
elif np.all([isinstance(ic, (UVCal, HERACal)) for ic in input_cal]): # List of UVCal/HERACal objects
hc = reduce(operator.add, input_cal)
hc.__class__ = HERACal
return hc
else:
raise TypeError('If input is a list, it must be only strings or only UVCal/HERACal objects.')
else:
raise TypeError('Input must be a UVCal/HERACal object, a string, or a list of either.')
def load_cal(input_cal, return_meta=False):
'''Load calfits files or UVCal/HERACal objects into dictionaries, optionally
returning the most useful metadata. More than one spectral window is not supported.
Arguments:
input_cal: path to calfits file, UVCal/HERACal object, or a list of either
return_meta: if True, returns additional information (see below)
Returns:
if return_meta is True:
(gains, flags, quals, total_qual, ants, freqs, times, pols)
else:
(gains, flags)
gains: Dictionary of complex calibration gains as a function of time
and frequency with keys in the (1,'x') format
flags: Dictionary of flags in the same format as the gains
quals: Dictionary of of qualities of calibration solutions in the same
format as the gains (e.g. omnical chi^2 per antenna)
total_qual: ndarray of total calibration quality for the whole array
(e.g. omnical overall chi^2)
ants: ndarray containing unique antenna indices
freqs: ndarray containing frequency channels (Hz)
times: ndarray containing julian date bins of data
pols: list of antenna polarization strings
'''
# load HERACal object and extract gains, data, etc.
hc = to_HERACal(input_cal)
if hc.gain_array is not None:
gains, flags, quals, total_qual = hc.build_calcontainers()
else:
gains, flags, quals, total_qual = hc.read()
# return quantities
if return_meta:
return gains, flags, quals, total_qual, np.array([ant[0] for ant in hc.ants]), hc.freqs, hc.times, hc.pols
else:
return gains, flags
def write_cal(fname, gains, freqs, times, antpos=None, lsts=None, flags=None, quality=None, total_qual=None, antnums2antnames=None,
write_file=True, return_uvc=True, outdir='./', overwrite=False, gain_convention='divide',
history=' ', x_orientation="north", telescope_name='HERA', cal_style='redundant',
zero_check=True, telescope_location=HERA_TELESCOPE_LOCATION, **kwargs):
'''Format gain solution dictionary into pyuvdata.UVCal and write to file
Arguments:
fname : type=str, output file basename
gains : type=dictionary, holds complex gain solutions. keys are antenna + pol
tuple pairs, e.g. (2, 'x'), and keys are 2D complex ndarrays with time
along [0] axis and freq along [1] axis.
freqs : type=ndarray, holds unique frequencies channels in Hz
times : type=ndarray, holds unique times of integration centers in Julian Date
antpos : type=dictionary, antenna position dictionary. keys are antenna integers and values
are position vectors in meters in ENU (TOPO) frame. If this is
not supplied, antenna names, numbers and positions are set from
pyuvdata known telescopes.
lsts : Not used. Retained to prevent breaking API changes.
type=ndarray, holds unique lsts corresponding to the times. If None, converts
times to lsts using the default telescope coordinates given telescope_name.
flags : type=dictionary, holds boolean flags (True if flagged) for gains.
Must match shape of gains.
quality : type=dictionary, holds "quality" of calibration solution. Must match
shape of gains. See pyuvdata.UVCal doc for more details.
total_qual : type=dictionary, holds total_quality_array. Key(s) are polarization
string(s) and values are 2D (Ntimes, Nfreqs) ndarrays.
antnums2antnames : dict, keys antenna numbers (int), values antenna names (str)
Default is "ant{}".format(ant_num) for antenna names.
write_file : type=bool, if True, write UVCal to calfits file
return_uvc : type=bool, if True, return UVCal object
outdir : type=str, output file directory
overwrite : type=bool, if True overwrite output files
gain_convention : type=str, gain solutions formatted such that they 'multiply' into data
to get model, or 'divide' into data to get model
options=['multiply', 'divide']
history : type=str, history string for UVCal object.
x_orientation : type=str, orientation of X dipole, options=['east', 'north']
telescope_name : type=str, name of telescope
cal_style : type=str, style of calibration solutions, options=['redundant', 'sky']. If
cal_style == sky, additional params are required. See pyuvdata.UVCal doc.
zero_check : type=bool, if True, for gain values near zero, set to one and flag them.
telescope_location : type=ndarray, telescope location in xyz in ITRF (earth-centered frame).
kwargs : additional atrributes to set in pyuvdata.UVCal
Returns:
if return_uvc: returns UVCal object
else: returns None
'''
# get antenna info
ant_array = np.unique([k[0] for k in gains]).astype(int)
antenna_numbers = copy.copy(ant_array)
if antnums2antnames is None:
antenna_names = np.array(["ant{}".format(ant_num) for ant_num in antenna_numbers])
else:
antenna_names = np.array([antnums2antnames[ant_num] for ant_num in antenna_numbers])
Nants_data = len(ant_array)
# get polarization info: ordering must be monotonic in Jones number
jones_array = np.array(list(set([jstr2num(k[1], x_orientation=x_orientation) for k in gains.keys()])))
jones_array = jones_array[np.argsort(np.abs(jones_array))]
pol_array = np.array([jnum2str(j, x_orientation=x_orientation) for j in jones_array])
Njones = len(jones_array)
# get time info
time_array = np.array(times, float)
Ntimes = len(time_array)
# get frequency info
freq_array = np.array(freqs, float)
Nfreqs = len(freq_array)
if antpos is not None:
# get antenna positions in ITRF frame
lat, lon, alt = uvutils.LatLonAlt_from_XYZ(telescope_location)
antenna_positions = np.array([antpos[k] for k in antenna_numbers])
antenna_positions = uvutils.ECEF_from_ENU(
antenna_positions, latitude=lat, longitude=lon, altitude=alt
) - telescope_location
# form gain, flags and qualities
gain_array = np.empty((Nants_data, Nfreqs, Ntimes, Njones), complex)
flag_array = np.empty((Nants_data, Nfreqs, Ntimes, Njones), bool)
quality_array = np.empty((Nants_data, Nfreqs, Ntimes, Njones), float)
total_quality_array = np.empty((Nfreqs, Ntimes, Njones), float)
for i, p in enumerate(pol_array):
if total_qual is not None:
total_quality_array[:, :, i] = total_qual[p].T[None, :, :]
for j, a in enumerate(ant_array):
# ensure (a, p) is in gains
if (a, p) in gains:
gain_array[j, :, :, i] = gains[(a, p)].T[None, :, :]
if flags is not None:
flag_array[j, :, :, i] = flags[(a, p)].T[None, :, :]
else:
flag_array[j, :, :, i] = np.zeros((Nfreqs, Ntimes), bool)
if quality is not None:
quality_array[j, :, :, i] = quality[(a, p)].T[None, :, :]
else:
quality_array[j, :, :, i] = np.ones((Nfreqs, Ntimes), float)
else:
gain_array[j, :, :, i] = np.ones((Nfreqs, Ntimes), complex)
flag_array[j, :, :, i] = np.ones((Nfreqs, Ntimes), bool)
quality_array[j, :, :, i] = np.ones((Nfreqs, Ntimes), float)
if zero_check:
# Check gain_array for values close to zero, if so, set to 1
zero_check_arr = np.isclose(gain_array, 0, rtol=1e-10, atol=1e-10)
# copy arrays b/c they are still references to the input gain dictionaries
gain_array = gain_array.copy()
flag_array = flag_array.copy()
gain_array[zero_check_arr] = 1.0 + 0j
flag_array[zero_check_arr] += True
if zero_check_arr.max() is True:
warnings.warn("Some of values in self.gain_array were zero and are flagged and set to 1.")
data_dict = {
"gain_array": gain_array,
"flag_array": flag_array,
"quality_array": quality_array
}
if total_qual is not None:
data_dict["total_quality_array"] = total_quality_array
tel_loc_obj = EarthLocation.from_geocentric(*telescope_location, unit="m")
# create an object with empty data-size arrays using UVData.new()
if hasattr(UVCal(), "telescope"):
if antpos is None:
tel_use = Telescope.from_known_telescopes(telescope_name)
tel_use.x_orientation = x_orientation
else:
tel_use = Telescope.new(
name=telescope_name,
location=tel_loc_obj,
antenna_numbers=antenna_numbers,
antenna_names=antenna_names,
antenna_positions=antenna_positions,
x_orientation=x_orientation,
)
tel_params = {"telescope": tel_use}
else:
if antpos is None:
from pyuvdata import get_telescope
tel_use = get_telescope(telescope_name)
tel_params = {
"telescope_name": telescope_name,
"telescope_location": tel_loc_obj,
"antenna_numbers": tel_use.antenna_numbers,
"antenna_names": tel_use.antenna_names,
"antenna_positions": tel_use.antenna_positions,
"x_orientation": x_orientation,
}
else:
tel_params = {
"telescope_name": telescope_name,
"telescope_location": tel_loc_obj,
"antenna_numbers": antenna_numbers,
"antenna_names": antenna_names,
"antenna_positions": antenna_positions,
"x_orientation": x_orientation,
}
uvc = UVCal.new(
cal_style=cal_style,
gain_convention=gain_convention,
jones_array=jones_array,
time_array=time_array,
freq_array=freq_array,
cal_type="gain",
ant_array=ant_array,
history=history,
data=data_dict,
**tel_params,
**kwargs,
)
# write to file
if write_file:
# check output
fname = os.path.join(outdir, fname)
if os.path.exists(fname) and overwrite is False:
print("{} exists, not overwriting...".format(fname))
else:
uvc.write_calfits(fname, clobber=True)
# return object
if return_uvc:
return uvc
def update_uvcal(cal, gains=None, flags=None, quals=None, add_to_history='', **kwargs):
'''LEGACY CODE TO BE DEPRECATED!
Update UVCal object with gains, flags, quals, history, and/or other parameters
Cannot modify the shape of gain arrays. More than one spectral window is not supported.
Arguments:
cal: UVCal/HERACal object to be updated
gains: Dictionary of complex calibration gains with shape=(Ntimes,Nfreqs)
with keys in the (1,'x') format. Default (None) leaves unchanged.
flags: Dictionary like gains but of flags. Default (None) leaves unchanged.
quals: Dictionary like gains but of per-antenna quality. Default (None) leaves unchanged.
add_to_history: appends a string to the history of the output file
overwrite: if True, overwrites existing file at outfilename
kwargs: dictionary mapping updated attributs to their new values.
See pyuvdata.UVCal documentation for more info.
'''
original_class = cal.__class__
cal.__class__ = HERACal
cal._extract_metadata()
cal.update(gains=gains, flags=flags, quals=quals)
# Check gain_array for values close to zero, if so, set to 1
zero_check = np.isclose(cal.gain_array, 0, rtol=1e-10, atol=1e-10)
cal.gain_array[zero_check] = 1.0 + 0j
cal.flag_array[zero_check] += True
if zero_check.max() is True:
warnings.warn("Some of values in self.gain_array were zero and are flagged and set to 1.")
# Set additional attributes
cal.history += add_to_history
for attribute, value in kwargs.items():
if '.' in attribute:
top, bot = attribute.split('.')
getattr(cal, top).__setattr__(bot, value)
else:
cal.__setattr__(attribute, value)
cal.check()
cal.__class__ = original_class
def update_cal(infilename, outfilename, gains=None, flags=None, quals=None, add_to_history='', clobber=False, **kwargs):
'''Loads an existing calfits file with pyuvdata, modifies some subset of of its parameters,
and then writes a new calfits file to disk. Cannot modify the shape of gain arrays.
More than one spectral window is not supported.
Arguments:
infilename: filename of the base calfits file to be updated, or UVCal object
outfilename: filename of the new calfits file
gains: Dictionary of complex calibration gains with shape=(Ntimes,Nfreqs)
with keys in the (1,'x') format. Default (None) leaves unchanged.
flags: Dictionary like gains but of flags. Default (None) leaves unchanged.
quals: Dictionary like gains but of per-antenna quality. Default (None) leaves unchanged.
add_to_history: appends a string to the history of the output file
clobber: if True, overwrites existing file at outfilename
kwargs: dictionary mapping updated attributs to their new values.
See pyuvdata.UVCal documentation for more info.
'''
# Load infile
if isinstance(infilename, (UVCal, HERACal)):
cal = copy.deepcopy(infilename)
else:
cal = HERACal(infilename)
cal.read()
update_uvcal(cal, gains=gains, flags=flags, quals=quals, add_to_history=add_to_history, **kwargs)
# Write to calfits file
cal.write_calfits(outfilename, clobber=clobber)
def baselines_from_filelist_position(filename, filelist):
"""Determine indices of baselines to process.
This function determines antpairs to process given the position of a filename
in a list of files.
Parameters
----------
filename : string
name of the file being processed.
filelist : list of strings
name of all files over which computations are being parallelized.
Returns
-------
list
list of antpairs to process based on the position of the filename in the list of files.
"""
# The reason this function is not in utils is that it needs to use HERAData
hd = HERAData(filename)
bls = list(set([bl[:2] for bl in hd.bls]))
file_index = filelist.index(filename)
nfiles = len(filelist)
# Determine chunk size
nbls = len(bls)
chunk_size = nbls // nfiles + 1
lower_index = file_index * chunk_size
upper_index = np.min([(file_index + 1) * chunk_size, nbls])
output = bls[lower_index:upper_index]
return output
def throw_away_flagged_ants(infilename, outfilename, yaml_file=None, throw_away_fully_flagged_data_baselines=False, clobber=False):
"""Throw away completely flagged data.
Parameters
----------
infilename: str
path to a UVData file in uvh5 format.
outfilename: str
path to file to output trimmed data file.
yaml_file: str
path to a yaml flagging file with a list of antennas to flag.
Default is None.
throw_away_flagged_ants: bool, optional
if True, also throw away baselines where all data is flagged.
Warning: Don't use this for files with a small number of time integrations.
since this can easily happen by chance in files like this.
Default is False
clobber: bool, optional
overwrite output file if it already exists.
Default is False.
Returns
-------
hd: HERAData object
HERAData object containing data from infilename with baselines thrown out.
"""
hd = HERAData(infilename)
hd.read()
# throw away flagged antennas in yaml file.
if yaml_file is not None:
from hera_qm import utils as qm_utils
qm_utils.apply_yaml_flags(uv=hd, a_priori_flag_yaml=yaml_file,
ant_indices_only=True, flag_ants=True, flag_freqs=False,
flag_times=False, throw_away_flagged_ants=True)
# Write data
if throw_away_fully_flagged_data_baselines:
antpairs_to_keep = []
antpairs_not_to_keep = []
for antpair in hd.get_antpairs():
fully_flagged = True
for pol in hd.pols:
fully_flagged = fully_flagged & np.all(hd.get_flags(antpair + (pol, )))
if not fully_flagged:
antpairs_to_keep.append(antpair)
else:
antpairs_not_to_keep.append(antpair)
hd.select(bls=antpairs_to_keep)
else:
antpairs_not_to_keep = None
# wite to history.
history_string = f"Threw away flagged antennas from yaml_file={yaml_file} using throw_away_flagged_ants.\n"
history_string += f"Also threw out {antpairs_not_to_keep} because data was fully flagged.\n"
hd.history += utils.history_string(notes=history_string)
hd.write_uvh5(outfilename, clobber=clobber)
return hd
def throw_away_flagged_ants_parser():
# Parse arguments
ap = argparse.ArgumentParser(description="Throw away baselines whose antennas are flagged in a yaml file or which have all integrations/chans flagged.")
ap.add_argument("infilename", type=str, help="path to visibility data throw out flagged baselines..")
ap.add_argument("outfilename", type=str, help="path to new visibility file to write data with thrown out baselines..")
ap.add_argument("--yaml_file", default=None, type=str, help='yaml file with list of antennas to throw away.')
ap.add_argument("--throw_away_fully_flagged_data_baselines", default=False, action="store_true",
help="Also throw away baselines that have all channels and integrations flagged.")
ap.add_argument("--clobber", default=False, action="store_true", help='overwrites existing file at outfile')
return ap
def uvdata_from_fastuvh5(
meta: FastUVH5Meta,
antpairs: list[tuple[int, int]] | None = None,
times: np.ndarray | None = None,
lsts: np.ndarray | None = None,
start_jd: float | None = None,
lst_branch_cut: float = 0.0,
**kwargs
) -> UVData:
"""Convert a FastUVH5Meta object to a UVData object.
This is a convenience function to convert a FastUVH5Meta object to a UVData
object, and update some of the metadata.
Parameters
----------
meta : FastUVH5Meta
The metadata object to convert.
Returns
-------
UVData
The UVData object.
"""
uvd = meta.to_uvdata()
if not meta.blts_are_rectangular:
raise NotImplementedError("Cannot convert non-rectangular blts to UVData.")
if times is None and lsts is None:
times = meta.times
lsts = meta.lsts
elif times is None:
# DO STUFF
if start_jd is None:
raise AttributeError("if times is not given, start_jd must be given")
times = LST2JD(
lsts, start_jd=start_jd, allow_other_jd=True, lst_branch_cut=lst_branch_cut,
latitude=meta.telescope_location_lat_lon_alt_degrees[0],
longitude=meta.telescope_location_lat_lon_alt_degrees[1],
altitude=meta.telescope_location_lat_lon_alt_degrees[2],
)
elif lsts is None:
lsts = JD2LST(times, *meta.telescope_location_lat_lon_alt_degrees)
else:
assert len(times) == len(lsts)
if antpairs is None:
antpairs = meta.antpairs
if len(times) > 1:
timefirst = kwargs.get(
"time_axis_faster_than_bls", meta.time_axis_faster_than_bls
)
else:
timefirst = False
if not timefirst:
uvd.time_array = np.repeat(times, len(antpairs))
uvd.lst_array = np.repeat(lsts, len(antpairs))
uvd.ant_1_array = np.tile(np.array([antpair[0] for antpair in antpairs]), len(times))
uvd.ant_2_array = np.tile(np.array([antpair[1] for antpair in antpairs]), len(times))
else:
uvd.time_array = np.tile(times, len(antpairs))
uvd.lst_array = np.tile(lsts, len(antpairs))
uvd.ant_1_array = np.repeat(np.array([antpair[0] for antpair in antpairs]), len(times))
uvd.ant_2_array = np.repeat(np.array([antpair[1] for antpair in antpairs]), len(times))
uvd.Nblts = len(uvd.time_array)
uvd.Nbls = len(antpairs)
uvd.Ntimes = len(times)
uvd.integration_time = np.median(meta.integration_time) * np.ones(uvd.Nblts)
uvd.baseline_array = uvutils.antnums_to_baseline(
uvd.ant_1_array, uvd.ant_2_array, Nants_telescope=uvd.Nants_telescope
)
uvd.phase_center_id_array = np.zeros(uvd.Nblts, dtype=int)
uvd._set_app_coords_helper()
uvd.extra_keywords = meta.extra_keywords
uvd.set_uvws_from_antenna_positions()
# Overwrite some of the metadata.
for key, value in kwargs.items():
setattr(uvd, key, value)
# For dependent metadata, reset it to be consistent with the new metadata.
uvd.Nfreqs = uvd.freq_array.size
uvd.Npols = len(uvd.polarization_array)
uvd.spw_array = np.unique(uvd.flex_spw_id_array)
uvd.Nspws = len(uvd.spw_array)
uvd.Nants_data = len(np.unique(np.concatenate((uvd.ant_1_array, uvd.ant_2_array))))
uvd.telescope.Nants_telescope = len(uvd.telescope.antenna_numbers)
uvd.blts_are_rectangular = True
uvd.time_axis_faster_than_bls = timefirst
try:
uvd.pol_convention = meta.pol_convention
except AttributeError:
pass
try:
uvd.vis_units = meta.vis_units
except AttributeError:
pass
# This needs to be done, though it should be fixed in pyuvdata
uvd.history += uvd.pyuvdata_version_str
return uvd
|
HERA-TeamREPO_NAMEhera_calPATH_START.@hera_cal_extracted@hera_cal-main@[email protected]@.PATH_END.py
|
{
"filename": "Examples Green Taxi.ipynb",
"repo_name": "NannyML/nannyml",
"repo_path": "nannyml_extracted/nannyml-main/docs/example_notebooks/Examples Green Taxi.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# Uncomment if you are runnning this on Google Colab
# !pip install nannyml
# !pip install numpy==1.22
```
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from lightgbm import LGBMRegressor, plot_importance
import nannyml as nml
```
```python
# Read data from url
url = "https://d37ci6vzurychx.cloudfront.net/trip-data/green_tripdata_2016-12.parquet"
columns = ['lpep_pickup_datetime', 'PULocationID', 'DOLocationID', 'trip_distance', 'VendorID', 'payment_type', 'fare_amount', 'tip_amount']
data = pd.read_parquet(url, columns=columns)
```
```python
print(data.head(3).to_markdown(tablefmt="grid"))
```
+----+------------------------+----------------+----------------+-----------------+------------+----------------+---------------+--------------+
| | lpep_pickup_datetime | PULocationID | DOLocationID | trip_distance | VendorID | payment_type | fare_amount | tip_amount |
+====+========================+================+================+=================+============+================+===============+==============+
| 0 | 2016-12-01 00:13:25 | 225 | 65 | 2.79 | 2 | 2 | 11 | 0 |
+----+------------------------+----------------+----------------+-----------------+------------+----------------+---------------+--------------+
| 1 | 2016-12-01 00:06:47 | 255 | 255 | 0.45 | 2 | 1 | 3.5 | 0.96 |
+----+------------------------+----------------+----------------+-----------------+------------+----------------+---------------+--------------+
| 2 | 2016-12-01 00:29:45 | 41 | 42 | 1.2 | 1 | 3 | 6 | 0 |
+----+------------------------+----------------+----------------+-----------------+------------+----------------+---------------+--------------+
```python
# Choose only payments from Credit Cards
data = data.loc[data['payment_type'] == 1,].drop(columns='payment_type') # Credit card
# Choose only positive tip amounts
data = data[data['tip_amount'] >= 0]
# Sort data by pick up date
data = data.sort_values('lpep_pickup_datetime').reset_index(drop=True)
# Flag categoric columns as categoric
categoric_columns = ['PULocationID', 'DOLocationID', 'VendorID']
data[categoric_columns] = data[categoric_columns].astype('category')
# Create column with pick up time
data['pickup_time'] = data['lpep_pickup_datetime'].dt.hour
```
```python
# Create data partition
data['partition'] = pd.cut(
data['lpep_pickup_datetime'],
bins= [pd.to_datetime('2016-12-01'),
pd.to_datetime('2016-12-08'),
pd.to_datetime('2016-12-16'),
pd.to_datetime('2017-01-01')],
right=False,
labels= ['train', 'test', 'prod']
)
```
```python
# Set target and features
target = 'tip_amount'
features = [col for col in data.columns if col not in [target, 'lpep_pickup_datetime', 'partition']]
# Split the data
X_train = data.loc[data['partition'] == 'train', features]
y_train = data.loc[data['partition'] == 'train', target]
X_test = data.loc[data['partition'] == 'test', features]
y_test = data.loc[data['partition'] == 'test', target]
X_prod = data.loc[data['partition'] == 'prod', features]
y_prod = data.loc[data['partition'] == 'prod', target]
```
```python
display(y_train.describe().to_frame())
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>tip_amount</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>141568.000000</td>
</tr>
<tr>
<th>mean</th>
<td>2.363484</td>
</tr>
<tr>
<th>std</th>
<td>2.817078</td>
</tr>
<tr>
<th>min</th>
<td>0.000000</td>
</tr>
<tr>
<th>25%</th>
<td>1.060000</td>
</tr>
<tr>
<th>50%</th>
<td>1.960000</td>
</tr>
<tr>
<th>75%</th>
<td>3.000000</td>
</tr>
<tr>
<th>max</th>
<td>250.700000</td>
</tr>
</tbody>
</table>
</div>
```python
y_train.plot(kind='box')
plt.savefig('../_static/example_green_taxi_tip_amount_boxplot.svg', format='svg')
plt.show()
y_train.clip(lower=0, upper=y_train.quantile(0.8)).to_frame().hist()
plt.savefig('../_static/example_green_taxi_tip_amount_distribution.svg', format='svg')
plt.show()
```


```python
# Fit the model
model = LGBMRegressor(random_state=111)
model.fit(X_train, y_train)
# Make predictions
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
```
```python
# Make baseline predictions
y_pred_train_baseline = np.ones_like(y_train) * y_train.mean()
y_pred_test_baseline = np.ones_like(y_test) * y_train.mean()
# Measure train, test and baseline performance
mae_train = mean_absolute_error(y_train, y_pred_train).round(4)
mae_test = mean_absolute_error(y_test, y_pred_test).round(4)
mae_train_baseline = mean_absolute_error(y_train, y_pred_train_baseline).round(4)
mae_test_baseline = mean_absolute_error(y_test, y_pred_test_baseline).round(4)
```
```python
# Create performance report
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12,4))
title1 = 'Train MAE: {} (<> {})'.format(mae_train, mae_train_baseline)
ax1.set(title=title1, xlabel='y_train', ylabel='y_pred')
ax1.plot(y_train, y_train, color='red', linestyle=':')
ax1.scatter(y_train, y_pred_train, alpha=0.1)
title2 = 'Test MAE: {} (<> {})'.format(mae_test, mae_test_baseline)
ax2.set(title=title2, xlabel='y_test', ylabel='y_pred')
ax2.plot(y_test, y_test, color='red', linestyle=':')
ax2.scatter(y_test, y_pred_test, alpha=0.1)
plt.show()
```

```python
# plot the feature importance
fig, ax = plt.subplots()
plot_importance(model, ax=ax)
plt.savefig('../_static/example_green_taxi_feature_importance.svg', format='svg')
plt.show()
```

```python
y_pred_prod = model.predict(X_prod)
```
```python
reference_df = X_test.copy() # using the test set as a reference
reference_df['y_pred'] = y_pred_test # reference predictions
reference_df['tip_amount'] = y_test # ground truth (currect targets)
reference_df = reference_df.join(data['lpep_pickup_datetime']) # date
analysis_df = X_prod.copy() # features
analysis_df['y_pred'] = y_pred_prod # prod predictions
analysis_df = analysis_df.join(data['lpep_pickup_datetime']) # date
```
```python
dle = nml.DLE(
metrics=['mae'],
y_true='tip_amount',
y_pred='y_pred',
feature_column_names=features,
timestamp_column_name='lpep_pickup_datetime',
chunk_period='d' # perform an estimation daily
)
dle.fit(reference_df) # fit on the reference (test) data
estimated_performance = dle.estimate(analysis_df) # estimate on the prod data
```
```python
figure = estimated_performance.plot()
figure.write_image(f'../_static/example_green_taxi_dle.svg')
```
```python
drdc = nml.DataReconstructionDriftCalculator(
column_names=features,
timestamp_column_name='lpep_pickup_datetime',
chunk_period='d',
)
drdc.fit(reference_df)
multivariate_data_drift = drdc.calculate(analysis_df)
```
```python
figure = multivariate_data_drift.plot()
figure.write_image(f'../_static/example_green_taxi_pca_error.svg')
```
```python
udc = nml.UnivariateDriftCalculator(
column_names=features,
timestamp_column_name='lpep_pickup_datetime',
chunk_period='d',
)
udc.fit(reference_df)
univariate_data_drift = udc.calculate(analysis_df)
```
```python
figure = univariate_data_drift.filter(period='all', metrics='jensen_shannon', column_names=['DOLocationID']).plot(kind='distribution')
figure.write_image(f'../_static/example_green_taxi_location_udc.svg')
```
```python
figure = univariate_data_drift.filter(period='all', metrics='jensen_shannon', column_names=['pickup_time']).plot(kind='distribution')
figure.write_image(f'../_static/example_green_taxi_pickup_udc.svg')
```
```python
figure = univariate_data_drift.filter(period='all', metrics='jensen_shannon').plot(kind='distribution')
figure.write_image(f'../_static/example_green_taxi_all_udc.svg')
```
```python
perfc = nml.PerformanceCalculator(
metrics=['mae'],
y_true='tip_amount',
y_pred='y_pred',
problem_type='regression',
timestamp_column_name='lpep_pickup_datetime',
chunk_period='d'
)
perfc.fit(reference_df)
realized_performance = perfc.calculate(analysis_df.assign(tip_amount = y_prod))
figure = estimated_performance.filter(period='analysis').compare(realized_performance).plot()
figure.write_image(f'../_static/example_green_taxi_dle_vs_realized.svg')
```
|
NannyMLREPO_NAMEnannymlPATH_START.@nannyml_extracted@nannyml-main@docs@example_notebooks@Examples Green [email protected]_END.py
|
{
"filename": "sun_mask.py",
"repo_name": "TianlaiProject/tlpipe",
"repo_path": "tlpipe_extracted/tlpipe-master/tlpipe/timestream/sun_mask.py",
"type": "Python"
}
|
"""Mask the data when signal of the Sun is strong.
Inheritance diagram
-------------------
.. inheritance-diagram:: Mask
:parts: 2
"""
import numpy as np
import ephem
import aipy as a
import timestream_task
class Mask(timestream_task.TimestreamTask):
"""Mask the data when signal of the Sun is strong."""
params_init = {
'span': 30, # minutes
}
prefix = 'sm_'
def process(self, ts):
span = self.params['span']
nt = ts.local_vis.shape[0] # number of time points of this process
if nt > 0:
srclist, cutoff, catalogs = a.scripting.parse_srcs('Sun', 'misc')
cat = a.src.get_catalog(srclist, cutoff, catalogs)
s = cat.values()[0] # the Sun
# get transit time of calibrator
# array
aa = ts.array
local_juldate = ts['jul_date'].local_data
aa.set_jultime(local_juldate[0]) # the first obs time point of this process
mask_inds = []
# previous transit
prev_transit = aa.previous_transit(s)
prev_transit_start = a.phs.ephem2juldate(prev_transit - 0.5 * span * ephem.minute) # Julian date
prev_transit_end = a.phs.ephem2juldate(prev_transit + 0.5 * span * ephem.minute) # Julian date
prev_transit_start_ind = np.searchsorted(local_juldate, prev_transit_start, side='left')
prev_transit_end_ind = np.searchsorted(local_juldate, prev_transit_end, side='right')
if prev_transit_end_ind > 0:
mask_inds.append((prev_transit_start_ind, prev_transit_end_ind))
# next transit
next_transit = aa.next_transit(s)
next_transit_start = a.phs.ephem2juldate(next_transit - 0.5 * span * ephem.minute) # Julian date
next_transit_end = a.phs.ephem2juldate(next_transit + 0.5 * span * ephem.minute) # Julian date
next_transit_start_ind = np.searchsorted(local_juldate, next_transit_start, side='left')
next_transit_end_ind = np.searchsorted(local_juldate, next_transit_end, side='right')
if next_transit_start_ind < nt:
mask_inds.append((next_transit_start_ind, next_transit_end_ind))
# then all next transit if data is long enough
while (next_transit_end_ind < nt):
aa.set_jultime(next_transit_end)
next_transit = aa.next_transit(s)
next_transit_start = a.phs.ephem2juldate(next_transit - 0.5 * span * ephem.minute) # Julian date
next_transit_end = a.phs.ephem2juldate(next_transit + 0.5 * span * ephem.minute) # Julian date
next_transit_start_ind = np.searchsorted(local_juldate, next_transit_start, side='left')
next_transit_end_ind = np.searchsorted(local_juldate, next_transit_end, side='right')
if next_transit_start_ind < nt:
mask_inds.append((next_transit_start_ind, next_transit_end_ind))
# set mask
for si, ei in mask_inds:
ts.local_vis_mask[si:ei] = True # do not change vis directly
return super(Mask, self).process(ts)
|
TianlaiProjectREPO_NAMEtlpipePATH_START.@tlpipe_extracted@tlpipe-master@tlpipe@timestream@[email protected]_END.py
|
{
"filename": "apero_loc_nirps_he.py",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/recipes/nirps_he/apero_loc_nirps_he.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
apero_loc_nirps_he.py [obs dir] [files]
APERO localisation calibration recipe for NIRPS HE
Created on 2019-05-14 at 09:40
@author: cook
"""
from typing import Any, Dict, List, Optional, Tuple, Union
from apero import lang
from apero.base import base
from apero.core import constants
from apero.core.core import drs_database
from apero.core.core import drs_file
from apero.core.core import drs_log
from apero.core.utils import drs_recipe
from apero.core.utils import drs_startup
from apero.science.calib import gen_calib
from apero.science.calib import localisation
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'apero_loc_nirps_he.py'
__INSTRUMENT__ = 'NIRPS_HE'
__PACKAGE__ = base.__PACKAGE__
__version__ = base.__version__
__author__ = base.__author__
__date__ = base.__date__
__release__ = base.__release__
# Get Logging function
WLOG = drs_log.wlog
# Get Recipe class
DrsRecipe = drs_recipe.DrsRecipe
# Get parameter class
ParamDict = constants.ParamDict
# Get the text types
textentry = lang.textentry
# alias pcheck
pcheck = constants.PCheck(wlog=WLOG)
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(obs_dir: Optional[str] = None, files: Optional[List[str]] = None,
**kwargs) -> Union[Dict[str, Any], Tuple[DrsRecipe, ParamDict]]:
"""
Main function for apero_loc
:param obs_dir: string, the night name sub-directory
:param files: list of strings or string, the list of files to process
:param kwargs: any additional keywords
:type obs_dir: str
:type files: list[str]
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(obs_dir=obs_dir, files=files, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = drs_startup.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = drs_startup.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return drs_startup.end_main(params, llmain, recipe, success)
def __main__(recipe: DrsRecipe, params: ParamDict) -> Dict[str, Any]:
"""
Main code: should only call recipe and params (defined from main)
:param recipe: DrsRecipe, the recipe class using this function
:param params: ParamDict, the parameter dictionary of constants
:return: dictionary containing the local variables
"""
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
mainname = __NAME__ + '._main()'
# check qc
if 'files' in params['DATA_DICT']:
infiles = params['DATA_DICT']['files']
else:
# get files
infiles = params['INPUTS']['FILES'][1]
# check the quality control from input files
infiles = drs_file.check_input_qc(params, infiles, 'files')
# loc is run twice we need to check that all input files can be used
# together and we are not mixing both types
infiles = drs_file.check_input_dprtypes(params, recipe, infiles)
# get list of filenames (for output)
rawfiles = []
for infile in infiles:
rawfiles.append(infile.basename)
# deal with input data from function
if 'files' in params['DATA_DICT']:
rawfiles = params['DATA_DICT']['rawfiles']
combine = params['DATA_DICT']['combine']
# combine input images if required
elif params['INPUT_COMBINE_IMAGES']:
# get combined file
cond = drs_file.combine(params, recipe, infiles, math='median',
same_type=False)
infiles = [cond[0]]
combine = True
else:
combine = False
# get the number of infiles
num_files = len(infiles)
# load the calibration database
calibdbm = drs_database.CalibrationDatabase(params)
calibdbm.load_db()
# ----------------------------------------------------------------------
# Loop around input files
# ----------------------------------------------------------------------
for it in range(num_files):
# ------------------------------------------------------------------
# add level to recipe log
log1 = recipe.log.add_level(params, 'num', it)
# ------------------------------------------------------------------
# set up plotting (no plotting before this)
recipe.plot.set_location(it)
# print file iteration progress
drs_startup.file_processing_update(params, it, num_files)
# ge this iterations file
infile = infiles[it]
# get header from file instance
header = infile.get_header()
# ------------------------------------------------------------------
# Correction of file
# ------------------------------------------------------------------
props, image = gen_calib.calibrate_ppfile(params, recipe, infile,
database=calibdbm)
# ------------------------------------------------------------------
# Identify fiber type
# ------------------------------------------------------------------
# get pconst
pconst = constants.pload()
# identify fiber type based on data type
fiber = pconst.FIBER_DPRTYPE(dprtype=props['DPRTYPE'])
if fiber is None:
eargs = [props['DPRTYPE'], recipe.name, 'FLAT_DARK or DARK_FLAT',
infile.basename]
WLOG(params, 'error', textentry('00-013-00001', args=eargs))
fiber = None
# set a flag for fiber type in logging
science_fiber, _ = pconst.FIBER_KINDS()
if fiber in science_fiber:
log1.update_flags(SCIFIBER=True)
else:
log1.update_flags(REFFIBER=True)
# ------------------------------------------------------------------
# Construct image order_profile
# ------------------------------------------------------------------
order_profile = localisation.calculate_order_profile(params, image)
# ------------------------------------------------------------------
# Localization of orders on central column
# ------------------------------------------------------------------
# find and fit localisation
_fibers = pconst.FIBER_LOCALISATION(fiber)
ldict = dict()
for _fiber in _fibers:
lout = localisation.calc_localisation(params, recipe, image, _fiber)
ldict[_fiber] = lout
# deal with merging coefficients and formatting for use as they
# were in older codes (may be redundant in future)
m_out = localisation.merge_coeffs(params, ldict, image.shape[1])
cent_coeffs, wid_coeffs, fibername = m_out
# ------------------------------------------------------------------
# Localisation stats (for header and quality control)
# ------------------------------------------------------------------
lprops = localisation.loc_stats(params, fiber, cent_coeffs, wid_coeffs,
order_profile)
# ------------------------------------------------------------------
# Plot the image and fit points
# ------------------------------------------------------------------
# plot image above saturation threshold
# plot first and final fit over image
recipe.plot('LOC_IMAGE_FIT', image=image, coeffs=cent_coeffs,
kind=fibername, width_coeffs=wid_coeffs)
recipe.plot('LOC_IM_CORNER', image=image, params=params,
coeffs=cent_coeffs, width_coeffs=wid_coeffs)
# ------------------------------------------------------------------
# Plot of RMS for positions and widths
# ------------------------------------------------------------------
# recipe.plot('LOC_ORD_VS_RMS', rnum=rorder_num, fiber=fiber,
# rms_center=cent_rms, rms_fwhm=wid_rms)
# ------------------------------------------------------------------
# Quality control
# ------------------------------------------------------------------
qc_params, passed = localisation.loc_quality_control(params, lprops)
# update recipe log
log1.add_qc(qc_params, passed)
# ------------------------------------------------------------------
# write files
# ------------------------------------------------------------------
fargs = [infile, image, rawfiles, combine, fiber, props, order_profile,
lprops, qc_params]
outfiles = localisation.write_localisation_files(params, recipe, *fargs)
orderpfile, loco1file = outfiles
# ------------------------------------------------------------------
# Move to calibDB and update calibDB
# ------------------------------------------------------------------
if passed and params['INPUTS']['DATABASE']:
# copy the order profile to the calibDB
calibdbm.add_calib_file(orderpfile)
# copy the loco file to the calibDB
calibdbm.add_calib_file(loco1file)
# ---------------------------------------------------------------------
# if recipe is a reference and QC fail we generate an error
# ---------------------------------------------------------------------
if not passed and params['INPUTS']['REF']:
eargs = [recipe.name]
WLOG(params, 'error', textentry('09-000-00011', args=eargs))
# ------------------------------------------------------------------
# Summary plots
# ------------------------------------------------------------------
recipe.plot('SUM_LOC_IM_FIT', image=image, coeffs=cent_coeffs,
kind=fibername, width_coeffs=wid_coeffs)
recipe.plot('SUM_LOC_IM_CORNER', image=image, params=params,
coeffs=cent_coeffs, width_coeffs=wid_coeffs)
# ------------------------------------------------------------------
# Construct summary document
# ------------------------------------------------------------------
localisation.loc_summary(recipe, it, params, qc_params, props, lprops)
# ------------------------------------------------------------------
# update recipe log file
# ------------------------------------------------------------------
log1.end()
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return locals()
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@recipes@nirps_he@[email protected]_END.py
|
{
"filename": "_templateitemname.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/streamtube/colorbar/tickformatstop/_templateitemname.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="streamtube.colorbar.tickformatstop",
**kwargs,
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@streamtube@colorbar@tickformatstop@[email protected]_END.py
|
{
"filename": "test_measurementset.py",
"repo_name": "lwa-project/lsl",
"repo_path": "lsl_extracted/lsl-main/tests/test_measurementset.py",
"type": "Python"
}
|
"""
Unit test for the lsl.writer.measurementset module.
"""
import os
import time
import ephem
import unittest
import tempfile
import numpy as np
import shutil
from lsl.common import stations as lwa_common
from lsl.correlator import uvutils
from lsl.writer import measurementset
from lsl.astro import unix_to_taimjd
run_ms_tests = False
try:
import casacore
run_ms_tests = True
except ImportError:
pass
__version__ = "0.1"
__author__ = "Jayce Dowell"
@unittest.skipUnless(run_ms_tests, "requires the 'casacore' module")
class measurementset_tests(unittest.TestCase):
"""A unittest.TestCase collection of unit tests for the lsl.writer.measurementset.Ms
class."""
def setUp(self):
"""Turn off all numpy warnings and create the temporary file directory."""
np.seterr(all='ignore')
self.testPath = tempfile.mkdtemp(prefix='test-measurementset-', suffix='.tmp')
def _init_data(self):
"""Private function to generate a random set of data for writing a UVFITS
file. The data is returned as a dictionary with keys:
* freq - frequency array in Hz
* site - lwa.common.stations object
* stands - array of stand numbers
* bl - list of baseline pairs in real stand numbers
* vis - array of visibility data in baseline x freq format
"""
# Frequency range
freq = np.arange(0,512)*20e6/512 + 40e6
# Site and stands
site = lwa_common.lwa1
antennas = site.antennas[0:40:2]
# Set baselines and data
blList = uvutils.get_baselines(antennas, include_auto=True)
visData = np.random.rand(len(blList), len(freq))
visData = visData.astype(np.complex64)
return {'freq': freq, 'site': site, 'antennas': antennas, 'bl': blList, 'vis': visData}
def test_write_tables(self):
"""Test if the MeasurementSet writer writes all of the tables."""
testTime = time.time()
testFile = os.path.join(self.testPath, 'ms-test-W.ms')
# Get some data
data = self._init_data()
# Create a source other than zenith to try
source = ephem.FixedBody()
source._ra = 0.0
source._dec = np.pi/2
source._epoch = ephem.J2000
source.compute(data['site'])
# Start the table
tbl = measurementset.Ms(testFile, ref_time=testTime)
tbl.set_stokes(['xx'])
tbl.set_frequency(data['freq'])
tbl.set_geometry(data['site'], data['antennas'])
tbl.add_data_set(unix_to_taimjd(testTime), 6.0, data['bl'], data['vis'])
tbl.add_data_set(unix_to_taimjd(testTime+6.0), 6.0, data['bl'], data['vis'],
source=source)
tbl.write()
# Make sure everyone is there
self.assertTrue(os.path.exists(testFile))
for tbl in ('ANTENNA', 'DATA_DESCRIPTION', 'FEED', 'FIELD', 'FLAG_CMD', 'HISTORY',
'OBSERVATION', 'POINTING', 'POLARIZATION', 'PROCESSOR', 'SOURCE',
'SPECTRAL_WINDOW', 'STATE'):
self.assertTrue(os.path.exists(os.path.join(testFile, tbl)))
def test_writer_errors(self):
"""Test that common measurement set error conditions are caught."""
testTime = time.time()
testFile = os.path.join(self.testPath, 'ms-test-ERR.ms')
# Get some data
data = self._init_data()
for i in range(4):
# Start the file
ms = measurementset.Ms(testFile, ref_time=testTime, overwrite=True)
if i != 0:
ms.set_stokes(['xx'])
if i != 1:
ms.set_frequency(data['freq'])
if i != 2:
ms.set_geometry(data['site'], data['antennas'])
if i != 3:
ms.add_data_set(testTime, 6.0, data['bl'], data['vis'])
self.assertRaises(RuntimeError, ms.write)
def test_main_table(self):
"""Test the primary data table."""
testTime = time.time()
testFile = os.path.join(self.testPath, 'ms-test-UV.ms')
# Get some data
data = self._init_data()
# Start the table
tbl = measurementset.Ms(testFile, ref_time=testTime)
tbl.set_stokes(['xx'])
tbl.set_frequency(data['freq'])
tbl.set_geometry(data['site'], data['antennas'])
tbl.add_data_set(unix_to_taimjd(testTime), 6.0, data['bl'], data['vis'])
tbl.write()
# Open the table and examine
ms = casacore.tables.table(testFile, ack=False)
uvw = ms.getcol('UVW')
ant1 = ms.getcol('ANTENNA1')
ant2 = ms.getcol('ANTENNA2')
vis = ms.getcol('DATA')
ms2 = casacore.tables.table(os.path.join(testFile, 'ANTENNA'), ack=False)
mapper = ms2.getcol('NAME')
mapper = [int(m[3:], 10) for m in mapper]
# Correct number of visibilities
self.assertEqual(uvw.shape[0], data['vis'].shape[0])
self.assertEqual(vis.shape[0], data['vis'].shape[0])
# Correct number of uvw coordinates
self.assertEqual(uvw.shape[1], 3)
# Correct number of frequencies
self.assertEqual(vis.shape[1], data['freq'].size)
# Correct values
for row in range(uvw.shape[0]):
stand1 = ant1[row]
stand2 = ant2[row]
visData = vis[row,:,0]
# Find out which visibility set in the random data corresponds to the
# current visibility
i = 0
for a1,a2 in data['bl']:
if a1.stand.id == mapper[stand1] and a2.stand.id == mapper[stand2]:
break
else:
i = i + 1
# Run the comparison
for vd, sd in zip(visData, data['vis'][i,:]):
self.assertAlmostEqual(vd, sd, 8)
i = i + 1
ms.close()
ms2.close()
def test_multi_if(self):
"""writing more than one spectral window to a MeasurementSet."""
testTime = time.time()
testFile = os.path.join(self.testPath, 'ms-test-MultiIF.ms')
# Get some data
data = self._init_data()
# Start the table
tbl = measurementset.Ms(testFile, ref_time=testTime)
tbl.set_stokes(['xx'])
tbl.set_frequency(data['freq'])
tbl.set_frequency(data['freq']+10e6)
tbl.set_geometry(data['site'], data['antennas'])
tbl.add_data_set(unix_to_taimjd(testTime), 6.0, data['bl'],
np.concatenate([data['vis'], 10*data['vis']], axis=1))
tbl.write()
# Open the table and examine
ms = casacore.tables.table(testFile, ack=False)
uvw = ms.getcol('UVW')
ant1 = ms.getcol('ANTENNA1')
ant2 = ms.getcol('ANTENNA2')
ddsc = ms.getcol('DATA_DESC_ID')
vis = ms.getcol('DATA')
ms2 = casacore.tables.table(os.path.join(testFile, 'ANTENNA'), ack=False)
mapper = ms2.getcol('NAME')
mapper = [int(m[3:], 10) for m in mapper]
ms3 = casacore.tables.table(os.path.join(testFile, 'DATA_DESCRIPTION'), ack=False)
spw = [i for i in ms3.getcol('SPECTRAL_WINDOW_ID')]
# Correct number of visibilities
self.assertEqual(uvw.shape[0], 2*data['vis'].shape[0])
self.assertEqual(vis.shape[0], 2*data['vis'].shape[0])
# Correct number of uvw coordinates
self.assertEqual(uvw.shape[1], 3)
# Correct number of frequencies
self.assertEqual(vis.shape[1], data['freq'].size)
# Correct values
for row in range(uvw.shape[0]):
stand1 = ant1[row]
stand2 = ant2[row]
descid = ddsc[row]
visData = vis[row,:,0]
# Find out which visibility set in the random data corresponds to the
# current visibility
i = 0
for a1,a2 in data['bl']:
if a1.stand.id == mapper[stand1] and a2.stand.id == mapper[stand2]:
break
else:
i = i + 1
# Find out which spectral window this corresponds to
if spw[descid] == 0:
compData = data['vis']
else:
compData = 10*data['vis']
# Run the comparison
for vd, sd in zip(visData, compData[i,:]):
self.assertAlmostEqual(vd, sd, 8)
ms.close()
ms2.close()
ms3.close()
def tearDown(self):
"""Remove the test path directory and its contents"""
shutil.rmtree(self.testPath, ignore_errors=True)
class measurementset_test_suite(unittest.TestSuite):
"""A unittest.TestSuite class which contains all of the lsl.reader units
tests."""
def __init__(self):
unittest.TestSuite.__init__(self)
loader = unittest.TestLoader()
self.addTests(loader.loadTestsFromTestCase(measurementset_tests))
if __name__ == '__main__':
unittest.main()
|
lwa-projectREPO_NAMElslPATH_START.@lsl_extracted@lsl-main@tests@[email protected]_END.py
|
{
"filename": "_ticktextsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/marker/colorbar/_ticktextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="ticktextsrc",
parent_name="scattercarpet.marker.colorbar",
**kwargs
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@marker@colorbar@[email protected]_END.py
|
{
"filename": "prepare_ttv_fit.py",
"repo_name": "MNGuenther/allesfitter",
"repo_path": "allesfitter_extracted/allesfitter-master/allesfitter/prepare_ttv_fit.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 15:50:31 2020
@author:
Dr. Maximilian N. Günther
European Space Agency (ESA)
European Space Research and Technology Centre (ESTEC)
Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands
Email: [email protected]
GitHub: mnguenther
Twitter: m_n_guenther
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
#::: modules
import numpy as np
import matplotlib.pyplot as plt
import os
import warnings
#::: specific modules
try:
from wotan import flatten
except ImportError:
pass
#::: my modules
import allesfitter
from allesfitter.lightcurves import eclipse_width_smart
from allesfitter.exoworlds_rdx.lightcurves.index_transits import index_transits, get_tmid_observed_transits
from allesfitter.plotting import fullplot, brokenplot, chunkplot, monthplot, tessplot
###############################################################################
#::: prepare TTV fit (if chosen)
###############################################################################
def prepare_ttv_fit(datadir, style='fullplot', max_transits=20):
'''
Inputs:
-------
datadir : str
the working directory for allesfitter; must contain all the data files; output directories and files will also be created inside datadir
style : str
chose between 'fullplot', 'monthplot', and 'tessplot'; this defines how the plot looks
default is 'fullplot'
max_transits : int
the maximum number of transits to be plotted into the same pdf. If there were more transits than `max_transits`
additional plots will be created.
Outputs:
--------
None
Notes:
------
This function must be run *after* reduce_phot_data()
Throughout, we use fast_fit_width as the approximation for the transit window
'''
#----------------------------------------------------------------------
#::: setup
#----------------------------------------------------------------------
alles = allesfitter.allesclass(datadir)
window = alles.settings['fast_fit_width']
# colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] #the default sequence of plot colors (for the companions)
if not os.path.exists( os.path.join(datadir,'ttv_preparation') ): os.makedirs(os.path.join(datadir,'ttv_preparation'))
with open(os.path.join(datadir,'ttv_preparation','ttv_initial_guess_params.csv'),'w') as f:
f.write('')
#----------------------------------------------------------------------
#::: get the combined, full data from all instruments
#----------------------------------------------------------------------
time_combined = []
flux_combined = []
for inst in alles.settings['inst_phot']:
time_combined += list(alles.fulldata[inst]['time'])
flux_combined += list(alles.fulldata[inst]['flux'])
ind_sort = np.argsort(time_combined)
time_combined = np.array(time_combined)[ind_sort]
flux_combined = np.array(flux_combined)[ind_sort]
#----------------------------------------------------------------------
#::: get eclipse widths per companion
#----------------------------------------------------------------------
eclipse_width = {}
for companion in alles.settings['companions_phot']:
alles.initial_guess_params_median[companion+'_epoch']
eclipse_width[companion] = eclipse_width_smart(alles.initial_guess_params_median[companion+'_period'],
alles.initial_guess_params_median[companion+'_rr'],
alles.initial_guess_params_median[companion+'_rsuma'],
alles.initial_guess_params_median[companion+'_cosi'],
alles.initial_guess_params_median[companion+'_f_s'],
alles.initial_guess_params_median[companion+'_f_c'])[0]
#----------------------------------------------------------------------
#::: loop over all photometric companions
#----------------------------------------------------------------------
for companion in alles.settings['companions_phot']:
with open(os.path.join(datadir,'ttv_preparation','ttv_initial_guess_params.csv'),'a') as f:
f.write('#TTV companion '+companion+',,,,,\n')
#----------------------------------------------------------------------
#::: compute tmid and a ttv_guess, and make per-transit-plots per companion
#----------------------------------------------------------------------
tmid_estimates = []
tmid_linear_predictions = get_tmid_observed_transits(time_combined,
alles.initial_guess_params_median[companion+'_epoch'],
alles.initial_guess_params_median[companion+'_period'],
window)
N = len(tmid_linear_predictions)
end_transit_index = max_transits
for i, tmid1 in enumerate(tmid_linear_predictions):
plot_index = i % max_transits
if plot_index == 0:
end_transit_index = i + max_transits if i + max_transits < N else N
fig, axes = plt.subplots((end_transit_index - i), 1, figsize=(6, 4 * (end_transit_index - i)), sharey=True, tight_layout=True)
#::: estimate the observed tranist midtime by computing the minimum of the data around the linearly predicted transit midtime
ind_tr1 = np.where((time_combined >= (tmid1 - window/2.)) & (time_combined <= (tmid1 + window/2.)))[0]
tr_times = time_combined[ind_tr1]
tr_flux = flux_combined[ind_tr1]
t_exp = np.median(np.diff(tr_times))
N_points_in_eclipse = int(eclipse_width[companion]/t_exp)
try:
trend = flatten(tr_times, tr_flux, window_length=eclipse_width[companion]/2., method='biweight', return_trend=True)[1]
tmid2 = np.median( tr_times[ np.argsort(trend)[0:int(N_points_in_eclipse/2.)] ] ) # the tmid estimated from the observations as the minimum of the data
except:
warnings.warn('Install wotan for improved performance of prepare_ttv_fit().')
trend = None
tmid2 = np.median( tr_times[ np.argsort(tr_times)[0:int(N_points_in_eclipse/2.)] ] ) # the tmid estimated from the observations as the minimum of the data
ttv_guess = tmid2 - tmid1 # a guess for the TTV
tmid_estimates.append(tmid2)
#::: plot this per transit
ax = axes[plot_index] if isinstance(axes, (list, np.ndarray)) else axes
ax.plot(tr_times, tr_flux, marker='.', ls='none', color=alles.settings[companion+'_color'], rasterized=True)
if trend is not None: ax.plot(tr_times, trend, 'r-')
ax.axvline(tmid1,c='grey',ls='--',label='linear prediction')
ax.axvline(tmid2,c='r',ls='--',label='flux minimum')
ax.set(xlabel='Time', ylabel='Flux', xlim=[tmid1-window/2., tmid1+window/2.])
ax.text(0.95,0.95,'Transit '+str(i+1), va='top', ha='right', transform=ax.transAxes)
with open(os.path.join(datadir,'ttv_preparation','ttv_initial_guess_params.csv'),'a') as f:
f.write(companion+'_ttv_transit_'+str(i+1)+','+np.format_float_positional(ttv_guess,4)+',1,uniform '+np.format_float_positional(ttv_guess-0.01,4)+' '+np.format_float_positional(ttv_guess+0.01,4)+',TTV$_\mathrm{'+companion+';'+str(i+1)+'}$,d\n')
if i == end_transit_index - 1:
ax_for_legend = axes[0] if isinstance(axes, (list, np.ndarray)) else axes
ax_for_legend.legend()
fig.savefig(os.path.join(datadir,'ttv_preparation','ttv_preparation_'+companion+'_per_transit_' + str(end_transit_index) + 'th.pdf'), bbox_inches='tight')
plt.close(fig)
tmid_estimates = np.array(tmid_estimates)
#----------------------------------------------------------------------
#::: ttv guess 0-C plot
#----------------------------------------------------------------------
nr = np.array([ int(np.round( (t-tmid_estimates[0]) / alles.initial_guess_params_median[companion+'_period'] )) for t in tmid_estimates ]) #get corresponding transit number
nr -= int(nr[-1]/2.) #shift into the middle of the data set
period_mean, epoch_mean = np.polyfit(nr, tmid_estimates, 1)
fig, axes = plt.subplots(2,1,figsize=(6,8),tight_layout=True,sharex=True)
axes[0].plot(nr, tmid_estimates, marker='o', ls='none', color=alles.settings[companion+'_color'], label=companion)
axes[0].plot(nr, epoch_mean + nr * period_mean, marker='', ls='--', color='grey')
axes[0].set(ylabel='Transit mid-time')
axes[0].legend()
axes[1].plot(nr, tmid_estimates-tmid_linear_predictions, marker='o', ls='none', color=alles.settings[companion+'_color'])
axes[1].axhline(0, ls='--', color='grey')
axes[1].set(xlabel='Nr.', ylabel='TTV (min.)')
fig.savefig(os.path.join(datadir,'ttv_preparation','ttv_preparation_'+companion+'_oc.pdf'), bbox_inches='tight')
plt.close(fig)
#----------------------------------------------------------------------
#::: compute and output the deviation
#----------------------------------------------------------------------
period_dev = np.abs( (period_mean-alles.initial_guess_params_median[companion+'_period'])/alles.initial_guess_params_median[companion+'_period'] )
epoch_dev = np.abs( (epoch_mean-alles.initial_guess_params_median[companion+'_epoch'])/alles.initial_guess_params_median[companion+'_epoch'] )
print('\nCompanion', companion)
print('Initial guess for mean period and epoch:')
print(np.format_float_positional(alles.initial_guess_params_median[companion+'_period']),
np.format_float_positional(alles.initial_guess_params_median[companion+'_epoch']))
print('New estimate for mean period and epoch:')
print(np.format_float_positional(period_mean,4),
np.format_float_positional(epoch_mean,4))
if (period_dev > 0.01) or (epoch_dev > 0.01):
print('\n! Consider updating your initial guess to these new estimated mean values.')
print('\n! If you do, then you must rerun this code.')
else:
print('\n! Looks great! You are ready to fit.')
#----------------------------------------------------------------------
#::: full lightcurve plot
#----------------------------------------------------------------------
#::: plot all data points
if style=='fullplot':
axes = fullplot(time_combined, flux_combined, color='silver')
elif style=='brokenplot':
axes = brokenplot(time_combined, flux_combined, color='silver')
elif style=='chunkplot':
axes = chunkplot(time_combined, flux_combined, color='silver')
elif style=='monthplot':
axes = monthplot(time_combined, flux_combined, color='silver')
elif style=='tessplot':
axes = tessplot(time_combined, flux_combined, color='silver')
else:
raise ValueError("The keyword argument 'style' must be 'fullplot', 'monthplot', or 'tessplot'.")
#::: mark the tranists/eclipses of each photometric companion
for i, c in enumerate(alles.settings['companions_phot']):
ind_tr, ind_out = index_transits(time_combined, alles.initial_guess_params_median[c+'_epoch'], alles.initial_guess_params_median[c+'_period'], window)
if style=='fullplot':
axes = fullplot(time_combined[ind_tr], flux_combined[ind_tr], color=alles.settings[c+'_color'], ax=axes, label=c)
elif style=='brokenplot':
axes = brokenplot(time_combined[ind_tr], flux_combined[ind_tr], color=alles.settings[c+'_color'], bax=axes, label=c)
elif style=='chunkplot':
axes = chunkplot(time_combined[ind_tr], flux_combined[ind_tr], color=alles.settings[c+'_color'], axes=axes, label=c)
elif style=='monthplot':
axes = monthplot(time_combined[ind_tr], flux_combined[ind_tr], color=alles.settings[c+'_color'], axes=axes, label=c)
elif style=='tessplot':
axes = tessplot(time_combined[ind_tr], flux_combined[ind_tr], color=alles.settings[c+'_color'], axes=axes, label=c)
#::: add legend
axes = np.atleast_1d(axes)
for i, c in enumerate(alles.settings['companions_phot']):
axes[0].text(0.02+i*0.02, 0.95, c, color=alles.settings[c+'_color'], ha='left', va='top', transform=axes[0].transAxes, zorder=15)
#::: add vertical lines and numbers
flux_min = np.nanmin(flux_combined)
flux_max = np.nanmax(flux_combined)
for ax in axes:
for i, tmid in enumerate(alles.data[companion+'_tmid_observed_transits']):
if (tmid>ax.get_xlim()[0]) & (tmid<ax.get_xlim()[1]):
ax.text( tmid, 0.992*flux_min, str(i+1), color=alles.settings[companion+'_color'], ha='center', zorder=12 )
ax.axvline( tmid, color='lightgrey', zorder=11 )
ax.set(ylim=[0.99*flux_min, 1.002*flux_max], title='Companion '+companion)
#::: wrap up
fname = os.path.join(datadir,'ttv_preparation','ttv_preparation_'+companion+'.jpg')
fig = plt.gcf()
fig.savefig(fname, bbox_inches='tight' )
plt.close(fig)
|
MNGuentherREPO_NAMEallesfitterPATH_START.@allesfitter_extracted@allesfitter-master@allesfitter@[email protected]_END.py
|
{
"filename": "qc_wavelength.py",
"repo_name": "LCOGT/banzai-nres",
"repo_path": "banzai-nres_extracted/banzai-nres-main/banzai_nres/qc/qc_wavelength.py",
"type": "Python"
}
|
import numpy as np
from banzai.stages import Stage
from banzai.utils import qc
from banzai.logs import get_logger
from astropy import constants
from astropy import units
from xwavecal.utils.wavelength_utils import find_nearest
from banzai.utils.stats import robust_standard_deviation
logger = get_logger()
class AssessWavelengthSolution(Stage):
def __init__(self, runtime_context):
super(AssessWavelengthSolution, self).__init__(runtime_context)
def do_stage(self, image):
lab_lines = find_nearest(image.features['wavelength'], np.sort(image.line_list))
delta_lambda = image.features['wavelength'] - lab_lines
sigma_delta_lambda = robust_standard_deviation(delta_lambda)
low_scatter_lines = delta_lambda < 3. * sigma_delta_lambda
matched_sigma_delta_lambda = robust_standard_deviation(delta_lambda[low_scatter_lines])
num_detected_lines = len(image.features['wavelength'])
num_matched_lines = np.count_nonzero(low_scatter_lines)
feature_centroid_uncertainty = image.features['centroid_err']
reduced_chi2 = get_reduced_chi_squared(delta_lambda[low_scatter_lines],
feature_centroid_uncertainty[low_scatter_lines])
velocity_precision = get_velocity_precision(image.features['wavelength'][low_scatter_lines],
lab_lines[low_scatter_lines], num_matched_lines)
if num_matched_lines == 0: # get rid of nans in the matched statistics if we have zero matched lines.
matched_sigma_delta_lambda, reduced_chi2, velocity_precision = 0, 0, 0 * units.meter/units.second
# opensearch keys don't have to be the same as the fits headers
qc_results = {'SIGLAM': np.round(matched_sigma_delta_lambda, 4),
'RVPRECSN': np.round(velocity_precision.to(units.meter/units.second).value, 4),
'WAVRCHI2': np.round(reduced_chi2, 4),
'NLINEDET': num_detected_lines,
'NLINES': num_matched_lines}
qc_description = {'SIGLAM': 'wavecal residuals [Angstroms]',
'RVPRECSN': 'wavecal precision [m/s]',
'WAVRCHI2': 'reduced chisquared goodness of wavecal fit',
'NLINEDET': 'Number of lines found on detector',
'NLINES': 'Number of matched lines'}
qc.save_qc_results(self.runtime_context, qc_results, image)
# saving the results to the image header
for key in qc_results.keys():
image.meta[key] = (qc_results[key], qc_description[key])
logger.info(f'wavecal precision (m/s) = {qc_results["RVPRECSN"]}', image=image)
if qc_results['RVPRECSN'] > 10 or qc_results['RVPRECSN'] < 3:
logger.warning(f' Final calibration precision is outside the expected range '
f'wavecal precision (m/s) = '
f'{qc_results["RVPRECSN"]}', image=image)
return image
def get_reduced_chi_squared(values, uncertainty):
return np.sum((values / uncertainty)**2) / len(values)
def get_velocity_precision(image_lines, lab_lines, num_matched_lines):
"""
calculating metrics in velocity space (easily understood by users) del lambda/ lambda * c = delta v.
then divide delta v by square root of the number of lines, giving the error on the mean of the residuals.
"""
delta_lambda = image_lines - lab_lines
dlam_overlam = delta_lambda / lab_lines
velocity_precision = robust_standard_deviation(dlam_overlam) / np.sqrt(num_matched_lines) * constants.c
return velocity_precision
|
LCOGTREPO_NAMEbanzai-nresPATH_START.@banzai-nres_extracted@banzai-nres-main@banzai_nres@qc@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/matrixlib/__init__.py",
"type": "Python"
}
|
"""Sub-package containing the matrix class and related functions.
"""
from __future__ import division, absolute_import, print_function
from .defmatrix import *
__all__ = defmatrix.__all__
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@matrixlib@[email protected]_END.py
|
{
"filename": "SF_class.ipynb",
"repo_name": "oyaron/NGSF",
"repo_path": "NGSF_extracted/NGSF-main/SF_class.ipynb",
"type": "Jupyter Notebook"
}
|
```python
#Make the json file into the first argument
import sys
sys.argv[1] = 'parameters.json'
from NGSF.sf_class import *
```
```python
from NGSF.params import Parameters
Parameters = Parameters(data)
```
```python
this_supernova = Superfit()
```
# Before the fit
```python
this_supernova.plot()
```

```python
this_supernova.mask_galaxy_lines()
```

```python
#this_supernova.mask_gal_lines_and_telluric()
```
```python
this_supernova.sg_error()
```

```python
this_supernova.mask_telluric()
```

```python
this_supernova.name
```
'SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS.flm'
# The Superfit
```python
this_supernova.superfit()
```
Running optimization for spectrum file: SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS with resolution = 10 Å
NGSF started
Runtime: 13.61s

```python
#this_supernova.any_result(2)
```
```python
```
```python
```
```python
this_supernova.results
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>SPECTRUM</th>
<th>GALAXY</th>
<th>SN</th>
<th>CONST_SN</th>
<th>CONST_GAL</th>
<th>Z</th>
<th>A_v</th>
<th>Phase</th>
<th>Band</th>
<th>Frac(SN)</th>
<th>Frac(gal)</th>
<th>CHI2/dof</th>
<th>CHI2/dof2</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS_b...</td>
<td>S0</td>
<td>Ic/1994I/KAST phase-band : -2.57B</td>
<td>0.415407</td>
<td>0.098116</td>
<td>0.127</td>
<td>-2.0</td>
<td>-2.57</td>
<td>B</td>
<td>0.968182</td>
<td>0.031818</td>
<td>5.300340</td>
<td>0.008180</td>
</tr>
<tr>
<th>1</th>
<td>SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS_b...</td>
<td>SB2</td>
<td>Ic-BL/1998bw/DFOSC phase-band : 12.0B</td>
<td>0.675496</td>
<td>0.142187</td>
<td>0.127</td>
<td>-1.2</td>
<td>12.00</td>
<td>B</td>
<td>0.902941</td>
<td>0.097059</td>
<td>5.589958</td>
<td>0.009119</td>
</tr>
<tr>
<th>2</th>
<td>SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS_b...</td>
<td>SB5</td>
<td>Ic-BL/1998bw/EFOSC2-3.6 phase-band : 20.0B</td>
<td>1.302232</td>
<td>0.313257</td>
<td>0.127</td>
<td>1.6</td>
<td>20.00</td>
<td>B</td>
<td>0.552998</td>
<td>0.447002</td>
<td>5.991344</td>
<td>0.009586</td>
</tr>
<tr>
<th>3</th>
<td>SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS_b...</td>
<td>S0</td>
<td>Ia-norm/2009Y/WFCCD phase-band : 12.36B</td>
<td>0.305018</td>
<td>0.649830</td>
<td>0.127</td>
<td>-1.2</td>
<td>12.36</td>
<td>B</td>
<td>0.628687</td>
<td>0.371313</td>
<td>5.718534</td>
<td>0.009894</td>
</tr>
<tr>
<th>4</th>
<td>SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS_b...</td>
<td>SB5</td>
<td>Ic-BL/1998bw/DFOSC phase-band : 7.0B</td>
<td>0.549874</td>
<td>0.299452</td>
<td>0.127</td>
<td>-1.4</td>
<td>7.00</td>
<td>B</td>
<td>0.841632</td>
<td>0.158368</td>
<td>6.230191</td>
<td>0.010163</td>
</tr>
<tr>
<th>5</th>
<td>SN2021urb_2021-08-06_00-00-00_Keck1_LRIS_TNS_b...</td>
<td>SB5</td>
<td>Ic-BL/1998bw/DFOSC phase-band : 46.24B</td>
<td>0.812917</td>
<td>0.343553</td>
<td>0.127</td>
<td>0.0</td>
<td>46.24</td>
<td>B</td>
<td>0.696958</td>
<td>0.303042</td>
<td>6.785714</td>
<td>0.011052</td>
</tr>
</tbody>
</table>
</div>
```python
for i in range(4):
this_supernova.any_result(i)
```




```python
```
```python
```
### Versions
```python
import numpy
print('numpy version: '+numpy.__version__)
import scipy
print('scipy version: '+scipy.__version__)
import matplotlib
print('matplotlib version: '+matplotlib.__version__)
import astropy
print('astropy version: '+astropy.__version__)
import pandas
print('Pandas version: ' +pandas.__version__)
import PyAstronomy
print('PyAstronomy version: '+PyAstronomy.__version__)
import json
print('json version: '+json.__version__)
import sys
print("Python version: ")
print (sys.version)
```
numpy version: 1.21.3
scipy version: 1.2.1
matplotlib version: 3.4.3
astropy version: 4.3.1
Pandas version: 1.3.4
PyAstronomy version: 0.15.1
json version: 2.0.9
Python version:
3.7.3 (default, Mar 27 2019, 22:11:17)
[GCC 7.3.0]
### Phase info
```python
```
```python
import pandas as pd
met = glob.glob('**/mjd**')[0]
metadata = pd.read_csv(met)
band_peak = np.array(metadata['band_peak'])
V,B,g,R,r,unknown =0,0,0,0,0,0
for i in band_peak:
if str(i) == 'V':
V+=1
elif str(i) == 'B':
B+=1
elif str(i) == 'g':
g+=1
elif str(i) == 'R':
R+=1
elif str(i) == 'r':
r+=1
elif str(i) == band_peak[-2]:
unknown+=1
else:
print(i)
```
```python
```
```python
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
count = np.array([V,B,g,R,r,unknown])
x = np.array(['V','B','g','R','r','unknown'])
plt.bar(x, count , alpha=0.5)
plt.xticks(x, x)
plt.ylabel('Occurance')
plt.title('Band Usage')
```
Text(0.5, 1.0, 'Band Usage')

```python
count
```
array([ 13, 114, 14, 8, 8, 33])
```python
```
```python
```
```python
```
```python
```
```python
```
|
oyaronREPO_NAMENGSFPATH_START.@NGSF_extracted@NGSF-main@[email protected]_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/example/fortran/pm_polation/getExtrap/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import glob
import sys
linewidth = 2
fontsize = 17
for kind in ["neimean", "neinear", "neinext", "neiprev", "piwilin", "monopol", "rungeEffect"]:
crd = pd.read_csv(glob.glob("*."+kind+".crd.txt")[0], delimiter = ",")
pattern = "*."+kind+".extrap.txt"
fileList = glob.glob(pattern)
for file in fileList:
df = pd.read_csv(file, delimiter = ",")
# start with a square Figure
fig = plt.figure(figsize = (8, 6))
ax = plt.subplot(1,1,1)
ax.scatter ( df.values[:, 0]
, df.values[:, 1]
, zorder = 1000
, c = "black"
, s = 8
)
ax.scatter ( crd.values[:, 0]
, crd.values[:,1]
, zorder = 1000
, c = "red"
, s = 20
)
plt.minorticks_on()
ax.set_xlabel("X", fontsize = 17)
ax.set_ylabel("Y", fontsize = 17)
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "x", which = "minor")
ax.tick_params(axis = "y", which = "minor")
ax.legend([file.split(".")[-3], "nodes"], fontsize = fontsize)
plt.tight_layout()
plt.savefig(file.replace(".txt",".png"))
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@example@fortran@pm_polation@[email protected]@.PATH_END.py
|
{
"filename": "RSD_benchmark.ipynb",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/examples/RSD_benchmark.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import pyccl as ccl
import py_cosmo_mad as csm
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import ticker, cm
from mpl_toolkits.mplot3d import axes3d
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
```
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
Cell In[1], line 3
1 import numpy as np
2 import pyccl as ccl
----> 3 import py_cosmo_mad as csm
4 import matplotlib
5 import matplotlib.pyplot as plt
ModuleNotFoundError: No module named 'py_cosmo_mad'
```python
# set up cosmologies in CCL
p1 = ccl.Parameters(Omega_c=0.25, Omega_b=0.05, h=0.7, sigma8=0.80, n_s=0.96, w0=-1.0, wa=0.0)
p2 = ccl.Parameters(Omega_c=0.25, Omega_b=0.05, h=0.7, sigma8=0.80, n_s=0.96, w0=-0.9, wa=0.0)
p3 = ccl.Parameters(Omega_c=0.25, Omega_b=0.05, h=0.7, sigma8=0.80, n_s=0.96, w0=-0.9, wa=0.1)
p4 = ccl.Parameters(Omega_c=0.25, Omega_b=0.05, h=0.7, sigma8=0.80, n_s=0.96, w0=-0.9, wa=0.1)
p5 = ccl.Parameters(Omega_c=0.25, Omega_b=0.05, h=0.7, sigma8=0.80, n_s=0.96, w0=-0.9, wa=0.1)
p1.parameters.Omega_g = 0
p2.parameters.Omega_g = 0
p3.parameters.Omega_g = 0
p4.parameters.Omega_g = 0
p5.parameters.Omega_g = 0
p4.parameters.Omega_l = 0.65
p5.parameters.Omega_l = 0.75
# define the cosmology model to use
p = p1
fname = 'model1_xiRSD.txt' # filename for writing benchmark data
cosmo = ccl.Cosmology(p)
```
```python
nk = 10000 # No. of points in k
ns = 5000 # No. of points in s
k_arr = np.logspace(-4.7,5,nk)
s_arr = np.logspace(-1,3,ns)
a = 1.0 # scale factor
h = 0.7 # Hubble
```
```python
# CCL power spectra
pk_lin = ccl.linear_matter_power(cosmo, k_arr, a)
pk_nl = ccl.nonlin_matter_power(cosmo, k_arr, a)
```
```python
# write power spectra to files to be read by CosmoMAD (Note: k is in units of h Mpc^-1 in CosmoMAD but Mpc^-1 in CCL)
file1 = open('pk_lin.dat','w')
for n in range(nk):
file1.write(str(k_arr[n]/h)+' '+str(pk_lin[n])+'\n')
file1.close()
file2 = open('pk_nl.dat','w')
for n in range(nk):
file2.write(str(k_arr[n]/h)+' '+str(pk_nl[n])+'\n')
file2.close()
```
```python
# Input CCL P(k)'s to CosmoMAD
pcsp=csm.PcsPar()
oc = p.parameters.Omega_c
ob = p.parameters.Omega_b
s8 = p.parameters.sigma8
ns = p.parameters.n_s
w0 = p.parameters.w0
wa = p.parameters.wa
pcsp.background_set(oc+ob,h,ob,w0,wa,h,2.725);
pcsp.set_linear_pk("pk_lin.dat",-5,1,0.005,ns,s8)
pcsp.set_nonlinear_pk("pk_nl.dat")
beta = pcsp.f_growth(a) # we assume bias = 1 and get growth factor from CosmoMAD
print('beta =', beta)
pcsp.set_Pk_params(beta,pcsp.growth_factor(a)/pcsp.growth_factor(1),1.0,4);
```
```python
# Calculate correlation function in CosmoMAD (Note: s in in units of Mpc/h in CosmoMAD but Mpc in CCL)
def xicosmomad (s_arr):
xi = []
for s in s_arr:
# xi.append(pcsp.xi_multipole(s*h,0) + 1./4*pcsp.xi_multipole(s*h,2) + 9./64*pcsp.xi_multipole(s*h,4))
xi.append(pcsp.xi_multipole(s*h,0))
return np.array(xi)
```
```python
# Plot relative and absolute differences
frac_diff1 = []
frac_diff2 = []
abs_diff1 = []
abs_diff2 = []
s1 = np.logspace(-1,2,40)
s2 = np.logspace(np.log10(50),np.log10(250),100)
xi_ccl1 = ccl.correlation_3dRsd_avgmu(cosmo,a,s1,beta)
xi_cosmomad1 = xicosmomad(s1)
xi_ccl2 = ccl.correlation_3dRsd_avgmu(cosmo,a,s2,beta)
xi_cosmomad2 = xicosmomad(s2)
for i in range(len(s1)):
frac_diff1.append(np.abs(xi_ccl1[i]/xi_cosmomad1[i] - 1.))
abs_diff1.append(np.abs(s1[i]*s1[i]*(xi_ccl1[i]-xi_cosmomad1[i])))
for i in range(len(s2)):
frac_diff2.append(np.abs(xi_ccl2[i]/xi_cosmomad2[i] - 1.))
abs_diff2.append(np.abs(s2[i]*s2[i]*(xi_ccl2[i]-xi_cosmomad2[i])))
# esthetic definitions for the plots
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams['font.size'] = 14
plt.plot(s1, frac_diff1, 'b-')
plt.plot(s2, frac_diff2, 'r-')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$s$ (Mpc)')
plt.ylabel(r'$\Delta \xi(s) / \xi(s)$')
plt.grid(which='major')
plt.title('Relative difference')
#plt.savefig('benchmark_rel.pdf',bbox_inches = 'tight')
plt.show()
#print frac_diff
print(f'Max frac diff for s = 0.1-100: {np.amax(frac_diff1)}')
print(f'Max frac diff for s = 50-250: {np.amax(frac_diff2)}')
```
```python
r2xi = np.array(s2*s2*xi_cosmomad2)
# plot absolute difference in r^2 xi(r)
plt.plot(s1, abs_diff1, 'b-')
plt.plot(s2, abs_diff2, 'r-')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$s$ (Mpc)')
plt.ylabel(r'$\Delta (s^2 \xi(s)) $')
plt.grid(which='minor')
plt.title('Absolute difference')
plt.grid(which='both')
#plt.savefig('benchmark_abs.pdf',bbox_inches = 'tight')
plt.show()
#print abs_diff
print(f'Max diff for s = 0.1-100: {np.amax(abs_diff1)}')
print(f'Max diff for s = 50-250: {np.amax(abs_diff2)}')
apex = np.amax(r2xi[(s2<160)&(s2>140)])
print(f'BAO peak height = max (s^2 xi) for s = 140-160 Mpc: {apex}')
# find and print the average of Delta(r^2 xi) in the BAO peak region
avg_value = np.average(np.array(abs_diff2)[(140<s2) & (s2<150)])
print(f'Average Delta(s^2 xi) for s = 140-150 Mpc: {avg_value}')
```
```python
# Write benchmark data to file
f = open(fname,'w')
f.write('# [0] s (Mpc; comoving), [1] xi(s,z=0.0)' + '\n')
for i in range(len(s1)):
col = []
s = ''
col.append("{:.18e}".format(s1[i]).ljust(27))
col.append("{:.18e}".format(xi_cosmomad1[i]).ljust(27))
s = s + col[0] + col[1]
f.write(s + '\n')
for i in range(len(s2)):
col = []
s = ''
col.append("{:.18e}".format(s2[i]).ljust(27))
col.append("{:.18e}".format(xi_cosmomad2[i]).ljust(27))
s = s + col[0] + col[1]
f.write(s + '\n')
f.close()
```
```python
# Make some plots of the difference s^2*xi(CCL) - s^2*xi(CosmoMAD) for xi(pi, sigma)
def bench_pi_sigma(pi,sigma):
xi = []
for sig in sigma:
xi.append(pcsp.xi_pi_sigma(pi*h,sig*h,1))
return np.array(xi)
```
```python
N = 200
l = 200
pi = np.linspace(0,l,N)
sigma = np.linspace(0,l,N)
X = np.ones([N,N]);
for i in range(N):
bench_xi = bench_pi_sigma(pi[i],sigma);
ccl_xi = ccl.correlation_pi_sigma(cosmo,a,beta,pi[i],sigma,True);
#X[i] = np.abs(1 - ccl_xi/bench_xi)
X[i] = np.absolute(ccl_xi - bench_xi) * (pi[i]**2 + sigma[i]**2)
```
```python
x = np.linspace(-l,l,2*N)
y = np.linspace(-l,l,2*N)
b1 = np.hstack((np.fliplr(X),X))
b2 = np.flipud(b1)
b = np.vstack((b2,b1))
#v = [0.0001,0.0002,0.0005,0.001,0.002,0.005,0.01]
v = [0.01,0.02,0.05,0.1,0.2,0.5,1.]
plt.figure(figsize = (6,6))
CP=plt.contour(x,y,b,v,colors=('r', 'g', 'y', 'c', 'b', 'm', 'k'))
plt.clabel(CP, inline=1, fontsize=8)
plt.xlabel(r'$\sigma$ $[\rm{Mpc}]$',fontsize=22)
plt.ylabel(r'$\pi$ $[\rm{Mpc}]$',fontsize=22)
#plt.grid()
#plt.savefig('benchmark1.pdf',bbox_inches = 'tight')
plt.show()
```
```python
plt.figure(figsize = (6,6))
plt.contourf(x,y,b,v,colors=('r', 'g', 'y', 'c', 'b', 'm', 'k'))
#plt.clabel(CP, inline=1, fontsize=12)
plt.xlabel(r'$\sigma$ $[\rm{Mpc}]$',fontsize=22)
plt.ylabel(r'$\pi$ $[\rm{Mpc}]$',fontsize=22)
#plt.grid()
#plt.savefig('benchmark2.pdf',bbox_inches = 'tight')
plt.show()
```
```python
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlabel(r'$\sigma$ $[\rm{Mpc}]$',fontsize=22)
ax.set_ylabel(r'$\pi$ $[\rm{Mpc}]$',fontsize=22)
#csp=plt.contourf(x, y, b, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
#cbar = fig.colorbar(csp)
plt.contourf(x, y, b, 20, cmap='Blues')
plt.colorbar();
#plt.savefig('benchmark3.pdf',bbox_inches = 'tight')
```
```python
fig1 = plt.figure(figsize=(7, 7))
ax1 = fig1.add_subplot(111, projection='3d')
X, Y = np.meshgrid(x, y)
ax1.plot_wireframe(X, Y, b, rstride=10, cstride=10)
ax1.set_xlabel(r'$\sigma$ $[\rm{Mpc}]$',fontsize=14)
ax1.set_ylabel(r'$\pi$ $[\rm{Mpc}]$',fontsize=14)
#ax1.set_zscale('log')
zlimits=ax1.set_zlim(0., 2.)
#plt.savefig('benchmark4.pdf',bbox_inches = 'tight')
```
```python
```
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@examples@[email protected]_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.