metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "rotint_sloppy.py",
"repo_name": "shihyuntang/igrins_rv",
"repo_path": "igrins_rv_extracted/igrins_rv-master/Engine/rotint_sloppy.py",
"type": "Python"
}
|
from scipy.signal import fftconvolve
from Engine.importmodule import *
def rotint(wave_spec,flux_spec,vrot):
'''
Applies rotational broadening to spectrum. This code is from the IvS Python
Repository and is referenced as such (Institute for Astronomy at
KU Leuven 2018, https://github.com/IvS-KULeuven/IvSPythonRepository)
Inputs:
wave_spec : Wavelength scale of spectrum
flux_spec : Corresponding flux of spectrum
vrot : vsin(i)
Outputs:
wave_conv : Rotationally broadened wavelength scale
1-flux_conv : Rotationally broadened flux
'''
epsilon = 0.6
wave_ = np.log(wave_spec)
velo_ = np.linspace(wave_[0], wave_[-1], len(wave_))
flux_ = np.interp(velo_,wave_, flux_spec)
dvelo = velo_[1]-velo_[0]
vrot = vrot/(2.99792458e5)
#-- compute the convolution kernel and normalise it
n = int( 2*vrot / dvelo)
velo_k = np.arange(n)*dvelo
velo_k -= velo_k[-1]/2.
y = 1 - (velo_k/vrot)**2 # transformation of velocity
G = ( 2*(1-epsilon)*np.sqrt(y) + np.pi*epsilon/2.*y ) \
/ ( np.pi*vrot * (1-epsilon/3.0) ) # the kernel
G /= np.sum(G)
#-- convolve the flux with the kernel
flux_conv = fftconvolve(1-flux_,G,mode='same')
velo_ = np.arange(len(flux_conv)) * dvelo+velo_[0]
wave_conv = np.exp(velo_)
return wave_conv,1-flux_conv
|
shihyuntangREPO_NAMEigrins_rvPATH_START.@igrins_rv_extracted@igrins_rv-master@Engine@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/email/mime/__init__.py",
"type": "Python"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@email@mime@[email protected]_END.py
|
|
{
"filename": "_parser.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/re/_parser.py",
"type": "Python"
}
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the __init__.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
from ._constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = frozenset("0123456789")
OCTDIGITS = frozenset("01234567")
HEXDIGITS = frozenset("0123456789abcdefABCDEF")
ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
WHITESPACE = frozenset(" \t\n\r\v\f")
_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT, POSSESSIVE_REPEAT})
_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY})
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
TYPE_FLAGS = SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE
GLOBAL_FLAGS = SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE
# Maximal value returned by SubPattern.getwidth().
# Must be larger than MAXREPEAT, MAXCODE and sys.maxsize.
MAXWIDTH = 1 << 64
class State:
# keeps track of state for parsing
def __init__(self):
self.flags = 0
self.groupdict = {}
self.groupwidths = [None] # group 0
self.lookbehindgroups = None
self.grouprefpos = {}
@property
def groups(self):
return len(self.groupwidths)
def opengroup(self, name=None):
gid = self.groups
self.groupwidths.append(None)
if self.groups > MAXGROUPS:
raise error("too many groups")
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %r as group %d; "
"was group %d" % (name, gid, ogid))
self.groupdict[name] = gid
return gid
def closegroup(self, gid, p):
self.groupwidths[gid] = p.getwidth()
def checkgroup(self, gid):
return gid < self.groups and self.groupwidths[gid] is not None
def checklookbehindgroup(self, gid, source):
if self.lookbehindgroups is not None:
if not self.checkgroup(gid):
raise source.error('cannot refer to an open group')
if gid >= self.lookbehindgroups:
raise source.error('cannot refer to group defined in the same '
'lookbehind subpattern')
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, state, data=None):
self.state = state
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + str(op), end='')
if op is IN:
# member sublanguage
print()
for op, a in av:
print((level+1)*" " + str(op), a)
elif op is BRANCH:
print()
for i, a in enumerate(av[1]):
if i:
print(level*" " + "OR")
a.dump(level+1)
elif op is GROUPREF_EXISTS:
condgroup, item_yes, item_no = av
print('', condgroup)
item_yes.dump(level+1)
if item_no:
print(level*" " + "ELSE")
item_no.dump(level+1)
elif isinstance(av, SubPattern):
print()
av.dump(level+1)
elif isinstance(av, seqtypes):
nl = False
for a in av:
if isinstance(a, SubPattern):
if not nl:
print()
a.dump(level+1)
nl = True
else:
if not nl:
print(' ', end='')
print(a, end='')
nl = False
if not nl:
print()
else:
print('', av)
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.state, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width is not None:
return self.width
lo = hi = 0
for op, av in self.data:
if op is BRANCH:
i = MAXWIDTH
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is ATOMIC_GROUP:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[-1].getwidth()
lo = lo + i
hi = hi + j
elif op in _REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
if av[1] == MAXREPEAT and j:
hi = MAXWIDTH
else:
hi = hi + j * av[1]
elif op in _UNITCODES:
lo = lo + 1
hi = hi + 1
elif op is GROUPREF:
i, j = self.state.groupwidths[av]
lo = lo + i
hi = hi + j
elif op is GROUPREF_EXISTS:
i, j = av[1].getwidth()
if av[2] is not None:
l, h = av[2].getwidth()
i = min(i, l)
j = max(j, h)
else:
i = 0
lo = lo + i
hi = hi + j
elif op is SUCCESS:
break
self.width = min(lo, MAXWIDTH), min(hi, MAXWIDTH)
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
if not self.istext:
string = str(string, 'latin1')
self.decoded_string = string
self.index = 0
self.next = None
self.__next()
def __next(self):
index = self.index
try:
char = self.decoded_string[index]
except IndexError:
self.next = None
return
if char == "\\":
index += 1
try:
char += self.decoded_string[index]
except IndexError:
raise error("bad escape (end of pattern)",
self.string, len(self.string) - 1) from None
self.index = index + 1
self.next = char
def match(self, char):
if char == self.next:
self.__next()
return True
return False
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def getuntil(self, terminator, name):
result = ''
while True:
c = self.next
self.__next()
if c is None:
if not result:
raise self.error("missing " + name)
raise self.error("missing %s, unterminated name" % terminator,
len(result))
if c == terminator:
if not result:
raise self.error("missing " + name, 1)
break
result += c
return result
@property
def pos(self):
return self.index - len(self.next or '')
def tell(self):
return self.index - len(self.next or '')
def seek(self, index):
self.index = index
self.__next()
def error(self, msg, offset=0):
if not self.istext:
msg = msg.encode('ascii', 'backslashreplace').decode('ascii')
return error(msg, self.string, self.tell() - offset)
def checkgroupname(self, name, offset):
if not (self.istext or name.isascii()):
msg = "bad character in group name %a" % name
raise self.error(msg, len(name) + offset)
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise self.error(msg, len(name) + offset)
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] is IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "N" and source.istext:
import unicodedata
# named unicode escape e.g. \N{EM DASH}
if not source.match('{'):
raise source.error("missing {")
charname = source.getuntil('}', 'character name')
try:
c = ord(unicodedata.lookup(charname))
except (KeyError, TypeError):
raise source.error("undefined character name %r" % charname,
len(charname) + len(r'\N{}')) from None
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape, len(escape))
return LITERAL, c
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error('bad escape %s' % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "N" and source.istext:
import unicodedata
# named unicode escape e.g. \N{EM DASH}
if not source.match('{'):
raise source.error("missing {")
charname = source.getuntil('}', 'character name')
try:
c = ord(unicodedata.lookup(charname))
except (KeyError, TypeError):
raise source.error("undefined character name %r" % charname,
len(charname) + len(r'\N{}')) from None
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8)
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape += source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape += source.get()
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape,
len(escape))
return LITERAL, c
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise source.error("cannot refer to an open group",
len(escape))
state.checklookbehindgroup(group, source)
return GROUPREF, group
raise source.error("invalid group reference %d" % group, len(escape) - 1)
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error("bad escape %s" % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _uniq(items):
return list(dict.fromkeys(items))
def _parse_sub(source, state, verbose, nested):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
start = source.tell()
while True:
itemsappend(_parse(source, state, verbose, nested + 1,
not nested and not items))
if not sourcematch("|"):
break
if not nested:
verbose = state.flags & SRE_FLAG_VERBOSE
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
# check if all items share a common prefix
while True:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpattern.append(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
set = []
for item in items:
if len(item) != 1:
break
op, av = item[0]
if op is LITERAL:
set.append((op, av))
elif op is IN and av[0][0] is not NEGATE:
set.extend(av)
else:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
subpattern.append((IN, _uniq(set)))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse(source, state, verbose, nested, first=False):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
_ord = ord
while True:
this = source.next
if this is None:
break # end of pattern
if this in "|)":
break # end of subpattern
sourceget()
if verbose:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while True:
this = sourceget()
if this is None or this == "\n":
break
continue
if this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
elif this not in SPECIAL_CHARS:
subpatternappend((LITERAL, _ord(this)))
elif this == "[":
here = source.tell() - 1
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if source.next == '[':
import warnings
warnings.warn(
'Possible nested set at position %d' % source.tell(),
FutureWarning, stacklevel=nested + 6
)
negate = sourcematch("^")
# check remaining characters
while True:
this = sourceget()
if this is None:
raise source.error("unterminated character set",
source.tell() - here)
if this == "]" and set:
break
elif this[0] == "\\":
code1 = _class_escape(source, this)
else:
if set and this in '-&~|' and source.next == this:
import warnings
warnings.warn(
'Possible set %s at position %d' % (
'difference' if this == '-' else
'intersection' if this == '&' else
'symmetric difference' if this == '~' else
'union',
source.tell() - 1),
FutureWarning, stacklevel=nested + 6
)
code1 = LITERAL, _ord(this)
if sourcematch("-"):
# potential range
that = sourceget()
if that is None:
raise source.error("unterminated character set",
source.tell() - here)
if that == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, _ord("-")))
break
if that[0] == "\\":
code2 = _class_escape(source, that)
else:
if that == '-':
import warnings
warnings.warn(
'Possible set difference at position %d' % (
source.tell() - 2),
FutureWarning, stacklevel=nested + 6
)
code2 = LITERAL, _ord(that)
if code1[0] != LITERAL or code2[0] != LITERAL:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
lo = code1[1]
hi = code2[1]
if hi < lo:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
setappend((RANGE, (lo, hi)))
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
set = _uniq(set)
# XXX: <fl> should move set optimization to compiler!
if _len(set) == 1 and set[0][0] is LITERAL:
# optimization
if negate:
subpatternappend((NOT_LITERAL, set[0][1]))
else:
subpatternappend(set[0])
else:
if negate:
set.insert(0, (NEGATE, None))
# charmap optimization can't be added here because
# global flags still are not known
subpatternappend((IN, set))
elif this in REPEAT_CHARS:
# repeat previous item
here = source.tell()
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, _ord(this)))
continue
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo += sourceget()
if sourcematch(","):
while source.next in DIGITS:
hi += sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, _ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise source.error("min repeat greater than max repeat",
source.tell() - here)
else:
raise AssertionError("unsupported quantifier %r" % (char,))
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or item[0][0] is AT:
raise source.error("nothing to repeat",
source.tell() - here + len(this))
if item[0][0] in _REPEATCODES:
raise source.error("multiple repeat",
source.tell() - here + len(this))
if item[0][0] is SUBPATTERN:
group, add_flags, del_flags, p = item[0][1]
if group is None and not add_flags and not del_flags:
item = p
if sourcematch("?"):
# Non-Greedy Match
subpattern[-1] = (MIN_REPEAT, (min, max, item))
elif sourcematch("+"):
# Possessive Match (Always Greedy)
subpattern[-1] = (POSSESSIVE_REPEAT, (min, max, item))
else:
# Greedy Match
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
start = source.tell() - 1
capture = True
atomic = False
name = None
add_flags = 0
del_flags = 0
if sourcematch("?"):
# options
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char == "P":
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = source.getuntil(">", "group name")
source.checkgroupname(name, 1)
elif sourcematch("="):
# named backreference
name = source.getuntil(")", "group name")
source.checkgroupname(name, 1)
gid = state.groupdict.get(name)
if gid is None:
msg = "unknown group name %r" % name
raise source.error(msg, len(name) + 1)
if not state.checkgroup(gid):
raise source.error("cannot refer to an open group",
len(name) + 1)
state.checklookbehindgroup(gid, source)
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
raise source.error("unknown extension ?P" + char,
len(char) + 2)
elif char == ":":
# non-capturing group
capture = False
elif char == "#":
# comment
while True:
if source.next is None:
raise source.error("missing ), unterminated comment",
source.tell() - start)
if sourceget() == ")":
break
continue
elif char in "=!<":
# lookahead assertions
dir = 1
if char == "<":
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char not in "=!":
raise source.error("unknown extension ?<" + char,
len(char) + 2)
dir = -1 # lookbehind
lookbehindgroups = state.lookbehindgroups
if lookbehindgroups is None:
state.lookbehindgroups = state.groups
p = _parse_sub(source, state, verbose, nested + 1)
if dir < 0:
if lookbehindgroups is None:
state.lookbehindgroups = None
if not sourcematch(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif char == "(":
# conditional backreference group
condname = source.getuntil(")", "group name")
if not (condname.isdecimal() and condname.isascii()):
source.checkgroupname(condname, 1)
condgroup = state.groupdict.get(condname)
if condgroup is None:
msg = "unknown group name %r" % condname
raise source.error(msg, len(condname) + 1)
else:
condgroup = int(condname)
if not condgroup:
raise source.error("bad group number",
len(condname) + 1)
if condgroup >= MAXGROUPS:
msg = "invalid group reference %d" % condgroup
raise source.error(msg, len(condname) + 1)
if condgroup not in state.grouprefpos:
state.grouprefpos[condgroup] = (
source.tell() - len(condname) - 1
)
if not (condname.isdecimal() and condname.isascii()):
import warnings
warnings.warn(
"bad character in group name %s at position %d" %
(repr(condname) if source.istext else ascii(condname),
source.tell() - len(condname) - 1),
DeprecationWarning, stacklevel=nested + 6
)
state.checklookbehindgroup(condgroup, source)
item_yes = _parse(source, state, verbose, nested + 1)
if source.match("|"):
item_no = _parse(source, state, verbose, nested + 1)
if source.next == "|":
raise source.error("conditional backref with more than two branches")
else:
item_no = None
if not source.match(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
subpatternappend((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
continue
elif char == ">":
# non-capturing, atomic group
capture = False
atomic = True
elif char in FLAGS or char == "-":
# flags
flags = _parse_flags(source, state, char)
if flags is None: # global flags
if not first or subpattern:
raise source.error('global flags not at the start '
'of the expression',
source.tell() - start)
verbose = state.flags & SRE_FLAG_VERBOSE
continue
add_flags, del_flags = flags
capture = False
else:
raise source.error("unknown extension ?" + char,
len(char) + 1)
# parse group contents
if capture:
try:
group = state.opengroup(name)
except error as err:
raise source.error(err.msg, len(name) + 1) from None
else:
group = None
sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and
not (del_flags & SRE_FLAG_VERBOSE))
p = _parse_sub(source, state, sub_verbose, nested + 1)
if not source.match(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if group is not None:
state.closegroup(group, p)
if atomic:
assert group is None
subpatternappend((ATOMIC_GROUP, p))
else:
subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p)))
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpatternappend((AT, AT_END))
else:
raise AssertionError("unsupported special character %r" % (char,))
# unpack non-capturing groups
for i in range(len(subpattern))[::-1]:
op, av = subpattern[i]
if op is SUBPATTERN:
group, add_flags, del_flags, p = av
if group is None and not add_flags and not del_flags:
subpattern[i: i+1] = p
return subpattern
def _parse_flags(source, state, char):
sourceget = source.get
add_flags = 0
del_flags = 0
if char != "-":
while True:
flag = FLAGS[char]
if source.istext:
if char == 'L':
msg = "bad inline flags: cannot use 'L' flag with a str pattern"
raise source.error(msg)
else:
if char == 'u':
msg = "bad inline flags: cannot use 'u' flag with a bytes pattern"
raise source.error(msg)
add_flags |= flag
if (flag & TYPE_FLAGS) and (add_flags & TYPE_FLAGS) != flag:
msg = "bad inline flags: flags 'a', 'u' and 'L' are incompatible"
raise source.error(msg)
char = sourceget()
if char is None:
raise source.error("missing -, : or )")
if char in ")-:":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing -, : or )"
raise source.error(msg, len(char))
if char == ")":
state.flags |= add_flags
return None
if add_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn on global flag", 1)
if char == "-":
char = sourceget()
if char is None:
raise source.error("missing flag")
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing flag"
raise source.error(msg, len(char))
while True:
flag = FLAGS[char]
if flag & TYPE_FLAGS:
msg = "bad inline flags: cannot turn off flags 'a', 'u' and 'L'"
raise source.error(msg)
del_flags |= flag
char = sourceget()
if char is None:
raise source.error("missing :")
if char == ":":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing :"
raise source.error(msg, len(char))
assert char == ":"
if del_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn off global flag", 1)
if add_flags & del_flags:
raise source.error("bad inline flags: flag turned on and off", 1)
return add_flags, del_flags
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if flags & SRE_FLAG_LOCALE:
raise ValueError("cannot use LOCALE flag with a str pattern")
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("cannot use UNICODE flag with a bytes pattern")
if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII:
raise ValueError("ASCII and LOCALE flags are incompatible")
return flags
def parse(str, flags=0, state=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if state is None:
state = State()
state.flags = flags
state.str = str
p = _parse_sub(source, state, flags & SRE_FLAG_VERBOSE, 0)
p.state.flags = fix_flags(str, p.state.flags)
if source.next is not None:
assert source.next == ")"
raise source.error("unbalanced parenthesis")
for g in p.state.grouprefpos:
if g >= p.state.groups:
msg = "invalid group reference %d" % g
raise error(msg, str, p.state.grouprefpos[g])
if flags & SRE_FLAG_DEBUG:
p.dump()
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
result = []
literal = []
lappend = literal.append
def addliteral():
if s.istext:
result.append(''.join(literal))
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
result.append(''.join(literal).encode('latin-1'))
del literal[:]
def addgroup(index, pos):
if index > pattern.groups:
raise s.error("invalid group reference %d" % index, pos)
addliteral()
result.append(index)
groupindex = pattern.groupindex
while True:
this = sget()
if this is None:
break # end of replacement string
if this[0] == "\\":
# group
c = this[1]
if c == "g":
if not s.match("<"):
raise s.error("missing <")
name = s.getuntil(">", "group name")
if not (name.isdecimal() and name.isascii()):
s.checkgroupname(name, 1)
try:
index = groupindex[name]
except KeyError:
raise IndexError("unknown group name %r" % name) from None
else:
index = int(name)
if index >= MAXGROUPS:
raise s.error("invalid group reference %d" % index,
len(name) + 1)
if not (name.isdecimal() and name.isascii()):
import warnings
warnings.warn(
"bad character in group name %s at position %d" %
(repr(name) if s.istext else ascii(name),
s.tell() - len(name) - 1),
DeprecationWarning, stacklevel=5
)
addgroup(index, len(name) + 1)
elif c == "0":
if s.next in OCTDIGITS:
this += sget()
if s.next in OCTDIGITS:
this += sget()
lappend(chr(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this += sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this += sget()
isoctal = True
c = int(this[1:], 8)
if c > 0o377:
raise s.error('octal escape value %s outside of '
'range 0-0o377' % this, len(this))
lappend(chr(c))
if not isoctal:
addgroup(int(this[1:]), len(this) - 1)
else:
try:
this = chr(ESCAPES[this][1])
except KeyError:
if c in ASCIILETTERS:
raise s.error('bad escape %s' % this, len(this)) from None
lappend(this)
else:
lappend(this)
addliteral()
return result
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@re@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/stream/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@stream@[email protected]_END.py
|
{
"filename": "asciidata.py",
"repo_name": "spacetelescope/stsdas_stripped",
"repo_path": "stsdas_stripped_extracted/stsdas_stripped-master/stsdas/pkg/analysis/slitless/axe/axe_asciidata/asciidata.py",
"type": "Python"
}
|
"""
Main class of the asciidata module
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: [email protected]
@since: 2005/09/13
$LastChangedBy: mkuemmel $
$LastChangedDate: 2008-01-08 18:17:08 +0100 (Tue, 08 Jan 2008) $
$LastChangedRevision: $
$HeadURL: $
"""
__version__ = "Version 1.1 $LastChangedRevision: 330 $"
import string, sys, os, types,copy
from .asciiheader import *
from .asciicolumn import *
from .asciisorter import *
from .asciierror import *
from .asciiutils import *
class NullData(object):
"""
Null class as a parent class for the AsciiData class
This parent classs of the AsciiData class offers to create
a new AsciiData instance without a file to read from.
All elements are set to None, but of course can later
be filled by the user.
"""
def __init__(self, ncols, nrows, null=None):
"""
Constructor for the NullData Class
Creates an empty AsciiData instance with columns and
rows as specified. All entries are 'None'.
@param ncols: the number of columns to be created
@type ncols: integer
@param nrows: the number of rows to be created
@type nrows: integer
@param null: string to be interpretet as NULL
@type null: string
"""
# set the default null string
if null:
self._null = [null.strip()]
else:
self._null = ['Null']
# create the colum list
self.columns = []
for index in range(ncols):
# get the column name
colname = self._def_colname(index)
# create and append an empty column
self.columns.append(AsciiColumn(nrows=nrows, colname=colname,
null=self._null))
def _def_colname(self, index):
"""
Gives the default column name.
The method composes and returns the
default column name for a column at a
given index.
@param index: the index of the column
@type index: integer
"""
return 'column'+str(index+1)
class AsciiData(NullData):
"""
Basic class in the AstroAsciiData project
This class and its methods forms the complete API for the
for the
"""
def __init__(self, filename=None, ncols=0, nrows=0, null=None,
delimiter=None, comment_char=None, columnInfo=0, headerComment=1):
"""
Constructor for the AsciiData Class
The data is taken from a file specified in the input.
As addition, a NULL string, a delimiter string and a comment_char
string can be specified. The ascii data is read in from the
file and stored in a list of Columns
@param filename: the filename to create the AsciiData from
@type filename: string
@param ncols: the number of columns to be created
@type ncols: integer
@param nrows: the number of rows to be created
@type nrows: integer
@param null: string to be interpretet as NULL
@type null: string
@param delimiter: string to be used as delimiter
@type delimiter: string
@param comment_char: string to be used as comment character
@type comment: string
"""
self.ncols = 0
self.nrows = 0
# set the default comment_char
if comment_char:
self._comment_char = comment_char
else:
self._comment_char = '#'
# set the default null string
if null:
self._null = [null.strip()]
else:
self._null = ['Null', 'NULL', 'None', '*']
# set the delimiter
self._delimiter = delimiter
# set the separator
self._separator = Separator(delimiter)
# create the header
self.header = Header(filename, self._comment_char)
# check whether a filename is given
if filename != None:
# check whether the file exists
if os.path.exists(filename):
self.filename = filename
else:
err_msg = "Filename: " + filename + " does not exist!"
raise Exception(err_msg)
# set public output flags
if self.header.SExtractorFlag:
self.columnInfo = 1
self.headerComment = 1
else:
self.columnInfo = 0
self.headerComment = 1
# load in all data from the files
self.columns = self._load_columns(filename, self._null,
self._comment_char, self._separator)
else:
# set the filename to none
self.filename = None
# check whether valid numbers where given
if nrows > 0 and ncols > 0:
# create the empty instance
super(AsciiData, self).__init__(ncols, nrows, null)
else:
err_msg = "Number of columns, rows: " \
+ str(ncols) + str(nrows) + " are not reasonable!"
raise Exception(err_msg)
# set the public output flags
# as the corresponding parameters
self.columnInfo = columnInfo
self.headerComment = headerComment
# find the number of undefined columns
self._undef_cols = self._find_undefined_cols(self.columns)
# find the number of columns and rows
self.ncols = len(self.columns)
if self.ncols:
self.nrows = self.columns[0].get_nrows()
def __getitem__(self, element):
"""
Defines the list operator for indexing
This method returns the index or indices as specified
in the input. In the current class therefore returns
either a column or a column slice as specified in the input.
@param element: either column index or slice or name
@type element: string/integer
@return: a column
@rtype: AsciiColumn(s)
"""
# this part deals with slices
if type(element) == slice:
# FIXME this must be possible to do more elegantly
start,stop,step = element.indices(self.ncols)
newAD = copy.deepcopy(self)
all = range(self.ncols)
inclusive = [x for x in all[start:stop:step]]
while all:
idx = all.pop()
if not idx in inclusive:
del newAD[idx]
return newAD
# this part deals with individual
# columns, specified by index or name
try:
index = self._loc_column(element)
except ColumnError:
index = self.append(element)
# return the desired column
return self.columns[index]
def __setitem__(self, element, column):
"""
Defines the list operator for indexed assignement
The method inserts a column to the class at the
specified index. As of now, it is not possible
to create extra columns with this method,
only existing columns can be overwritten.
@param element: either column index or name
@type element: string/integer
@param column: the column to assign to an index
@type column: AsciiColumn
"""
index = self._loc_column(element)
# check whether the column does have the same number
# of rows as the class
# raise an error if not
if column.get_nrows() != self.nrows:
err_msg = 'Nrows: '+str(column.get_nrows())+' different than nrows: '\
+str(self.nrows)+'!!'
raise Exception(err_msg)
# check whether the column has a name
if not column.colname:
# give it a default name
column.colname = self._def_colname(index)
# assign the new column
self.columns[index] = column.copy()
# transfer the null element to the new column
self.columns[index]._null[0] = self._null[0]
def __delitem__(self, element):
"""
Deletes an index.
The method deletes a column specified in the input.
The column can be specified either by the column
name or the index.
@param element: either column index or name
@type element: string/integer
"""
# get the index from the input
index = self._loc_column(element)
# delete the column
del self.columns[index]
# adjust the number of columns
self.ncols -= 1
def __iter__(self):
"""
Provide an iterator object.
The function provides and returns an interator object
for the AstroAsciiData class. Due to this iterator object
sequences like:
for column in ascii_data_object:
<do something with column>
are possible.
"""
return AsciiLenGetIter(self)
def __len__(self):
"""
Defines a length method for the object
@return: the length of the object
@rtype: integer
"""
return self.ncols
def str(self):
"""
Defines a string method for the object.
Gives a simple string method such that str(AsciiData)
does work. The formatting is close to the formatting
for the output to files.
@return: the string representation of the object
@rtype: string
"""
bigstring = ''
# take the object delimiter or ' '
if not self._delimiter:
delim = ' '
else:
delim = self._delimiter
# add the header to the string
bigstring = bigstring + str(self.header)
# go over each row
for ii in range(self.nrows):
# create the string list
strlist = self._row_tostring(ii)
# treat the first line different
if ii:
# transform the listing to one string and append it
# put a linefeed at the beginning
bigstring = bigstring + '\n' + delim.join(strlist)
else:
# transform the listing to one string and append it
bigstring = bigstring + delim.join(strlist)
return bigstring
def __str__(self):
"""
Defines a string method for the object.
Gives a simple string method such that str(AsciiData)
does work. The formatting is close to the formatting
for the output to files.
@return: the string representation of the object
@rtype: string
"""
bigstring = ''
# take the object delimiter or ' '
if not self._delimiter:
delim = ' '
else:
delim = self._delimiter
# print the column information
if self.columnInfo:
for n, col in enumerate(self.columns):
bigstring += str(col.collheader(n,self._comment_char))
# print the header
if self.headerComment:
bigstring += str(self.header)
# go over each row
for ii in range(self.nrows):
# create the string list
strlist = self._row_tostring(ii)
# treat the first line different
if ii:
# transform the listing to one string and append it
# put a linefeed at the beginning
bigstring = bigstring + '\n' + delim.join(strlist)
else:
# transform the listing to one string and append it
bigstring = bigstring + delim.join(strlist)
# return the string
return bigstring
def flush(self):
"""
Prints the current status to the file.
The methods gives the opportunity to replace the data in
the AsciiData with the current version in memory.
"""
if self.filename != None:
# well, that an easy job
self.writeto(self.filename)
else:
raise Exception('No filename given. Use "writeto()" instead.')
def writeto(self, filename, colInfo=None, headComment=None):
"""
Prints the AsciiData to a new file.
The method prints the current status of the
object to a new file. The name of the file
is given in the input. An already existing
file is replaced.
@param filename: the filename to write the object to
@type filename: string
"""
# check whether the parameter is set
if colInfo==None:
# if not, take the class variable
colInfo = self.columnInfo
# check whether the parameter is set
if headComment == None:
# if not, take the class calue
headComment = self.headerComment
# open the file
fstream = file(filename,'w+')
# open a printstream
nprinter = NicePrinter(fstream, delimiter=self._delimiter)
# print everything to the stream
self._print_tostream(nprinter, colInfo, headComment)
#close the file
fstream.close()
# use the given name as class filename
# if no one is yet defined
if self.filename == None:
self.filename = filename
def tofits(self):
"""
Transforms the AsciiData object to fits
@return: pointer to the fits object
@rtype: binary table HDU
"""
from . import asciifits
# create an AsciiFits object
asciiFits = asciifits.AsciiFits(self)
# return the table HDU
return asciiFits.tabhdu
def writetofits(self, fits_name=None):
"""
Prints the AsciiData to a new file.
@param fits_name: the name for the fits file
@type fits_name: string
@return: the name of the fits file
@rtype: string
"""
from . import asciifits
# check whether a file name is given
if fits_name == None:
# check wheter the instance has a filename
if self.filename == None:
# no automatic filename possible; raise error
raise Exception('Please specify a name for the fits-file!')
else:
# determine a filename for the fits
fits_name = self._get_fitsname(self.filename)
# create an AsciiFits object
asciiFits = asciifits.AsciiFits(self)
# write out the object onto disk
asciiFits.flush(fits_name)
# return the name of the fits object
return fits_name
def writetohtml(self, html_name=None, tr_attr=None, td_attr=None):
"""
Prints the AsciiData object as table in a html-file
@param filename: the filename to write the object to
@type filename: string
@param tr_attr: the attributes for the tr-tag
@type tr_att: string
@param td_attr: the attributes for the td-tag
@type td_att: string
@return: the name of the html-file
@rtype: string
"""
# check whether a file name is given
if html_name == None:
# check wheter the instance has a filename
if self.filename == None:
# no automatic filename possible; raise error
raise Exception('Please specify a name for the html-file!')
else:
# determine a filename for the html-file
html_name = self._get_htmlname(self.filename)
# determine the line start, element delimiter and the line end
l_start, l_delim, l_end = self._get_lineparams(tr_attr, td_attr)
# open the file
fstream = file(html_name,'w+')
# open a printstream
nprinter = NicePrinter(fstream, delimiter=l_delim,
linestart=l_start, linend=l_end)
# print the data
# go over each row
for ii in range(self.nrows):
# create the string list
strlist = self._row_tostring(ii)
# send the list to the printer
nprinter.print_list(strlist)
#close the file
fstream.close()
# return the filename
return html_name
def writetolatex(self, latex_name=None):
"""
Prints the AsciiData object as table in a latex-file
@param filename: the filename to write the object to
@type filename: string
@return: the name of the latex-file
@rtype: string
"""
# check whether a file name is given
if latex_name == None:
# check wheter the instance has a filename
if self.filename == None:
# no automatic filename possible; raise error
raise Exception('Please specify a name for the latex-file!')
else:
# determine a filename for the latex-file
latex_name = self._get_latexname(self.filename)
# open the file
fstream = file(latex_name,'w+')
# open a printstream with the correct parameters
# please note that each '\' must be protected by
# another '\' to be interpreted as string
nprinter = NicePrinter(fstream, delimiter='&', linend='\\\\\n')
# print the data
# go over each row
for ii in range(self.nrows):
# create the string list
strlist = self._row_tostring(ii)
# send the list to the printer
nprinter.print_list(strlist)
#close the file
fstream.close()
# return the filename
return latex_name
def info(self):
"""
Print class info to the screen.
The method gives some basic information on the
class. The output is directly written onto
the screen.
@return: the string representing the information
@rtype: string
"""
# define the return string
bigstring = ''
# assemble the basic table information
bigstring += 'File: ' + str(self.filename) +'\n'
bigstring += 'Ncols: ' + str(self.ncols) + '\n'
bigstring += 'Nrows: ' + str(self.nrows) + '\n'
bigstring += 'Delimiter: ' + str(self._delimiter) + '\n'
bigstring += 'Null value: ' + str(self._null) + '\n'
bigstring += 'comment_char: ' + str(self._comment_char) + '\n'
# go over each column and add
# the individual column info
for col in self.columns:
bigstring += col.info()
# return the result
return bigstring
def append(self, colname):
"""
Appends a new column to the object.
This method creates and appends a new column to the
object. The new column must be specified with a name.
The new column doe have only Null entries.
@param colname: the name of the column
@type colname: string
"""
# check whether the column name does exist
# raise a warning if yes
if self.find(colname) > -1:
err_msg = 'Column with name: '+colname+' does just exist!'
raise Exception(err_msg)
# get the index of the new column
index = self.ncols
# create and append the new column
self.columns.append(AsciiColumn(nrows=self.nrows, colname=colname,
null=self._null))
# adjust the number of columns
self.ncols +=1
#return the index of the column
return index
def find(self, colname):
"""
Finds the column number for a name.
The method looks through all columns of the instance
for a matching column name. In case the column name exists,
the column index is returned. If the column name does
not exist, -1 is returned.
@param colname: the name of the column
@type colname: string
@return: the index of the column, or -1
@rtype: integer
"""
for index in range(len(self.columns)):
if self.columns[index].colname == colname:
return index
return -1
def delete(self, start, end=None):
"""
Deletes a row slice or element from all columns.
The method deletes one or several rows from all columns.
It uses the __delelte__ or __delitem__ operators
in the AsciiColumn class.
@param start: the starting row index
@type start: integer
@param end: the end row index
@type end: integer
"""
if end:
if start < self.nrows:
# go over each column
for col in self.columns:
# delete the row
del col[start: end]
# adjust the number of rows
self.nrows -= end-start
else:
# go over each column
for col in self.columns:
# delete the row
del col[start]
# adjust the number of rows
self.nrows -= 1
# make a lower limit to the number of rows
if self.nrows < 0:
self.nrows = 0
def newcomment_char(self, comment_char):
"""
Define a new comment_char string
@param comment_char: the new null string
@type comment_char: string
"""
# store the new null element
self._comment_char = comment_char
self.header.set_comment_char(comment_char)
def newnull(self, newnull):
"""
Define a new null string
@param newnull: the new null string
@type newnull: string
"""
# store the new null element
self._null[0] = newnull
# store the new null in the columns
for column in self.columns:
column._null[0] = newnull
def newdelimiter(self, delimiter):
"""
Set a new delimiter string
@param delimiter: the new delimiter string
@type delimiter: string
"""
# set the new delimiter
self._delimiter = delimiter
# set the separator
self._separator = Separator(delimiter)
def insert(self, nrows, start=0):
"""
Inserts one or several rows
The method inserts one or several rows into all
columns of the class instance. The number of rows
as well as the positioning of the new rows are
specified in the input. The parameter 'start'
gives the index which the first inserted row
will have.
Setting "start=-1" means appending the rows at
the end of the columns
@param nrows: the number of rows to add
@type nrows: integer
@param start: the position of the inserted rows
@type start: integer
"""
# go over all columns
for col in self.columns:
# add none elements at the end
for ii in range(nrows):
col.add_element(None)
# check whether the new rows are inserted inside
# the old rows, then the elements must be moved
if start < self.nrows and start != -1:
# go over all columns
for col in self.columns:
# repeat over rows to be inserted
for ii in range(self.nrows-start):
# reorder the column elements
index = self.nrows - ii - 1
col[index+nrows] = col[index]
# repeat over rows to be inserted
for ii in range(nrows):
# insert None in the new rows
index = ii + start
col[index] = None
# update the number of rows
self.nrows = self.columns[0].get_nrows()
def sort(self, colname, descending=0, ordered=0):
"""
Sorts the entries along the values in one column
The method sorts all columns of the AsciiData object according
to the order in one specified column. Both, sorting in ascending
and descending order is possible.
@param colname: the column to use for sorting
@type colname: string/integer
@param descending: indicates ascending (=0) or descending (=1) sorting
@type descending: integer
@param ordered: indicates ordered (1) or non-ordered sorting
@type ordered: integer
"""
# initialize a temporary array
sort_data = []
# transfer the data from the sort column
# to the temporary array
for index in range(self.nrows):
sort_data.append(self[colname][index])
# create the sorting index
sorter = ColumnIndex()
# sort according to the data in the temporary array
sorter.sort(sort_data, descending, ordered)
# go over all colums
for index in range(self.ncols):
# reorder the data in the column according
# to the sorting order
self[index]._data = sorter.enindex(self[index]._data)
def rstrip(self,x=None):
'''
Removes trailing rows which contain the value of x
null is default (and the only value which really works)
syntactic sugar for _strip(-1,x)
@param x: Data value in rows to strip of - defaults to Null
@type x: any legal asciidata type
'''
self._strip(-1,x)
def lstrip(self,x=None):
'''
Removes leading rows which contain the value of x
null is default (and the only value which really works)
syntactic sugar for _strip(0,x)
@param x: Data value in rows to strip of - defaults to Null
@type x: any legal asciidata type
'''
self._strip(0,x)
def strip(self,x=None):
'''
Removes both leading and trailing rows which contain the value of x
null is default (and the only value which really works)
syntactic sugar for _strip
@param x: Data value in rows to strip of - defaults to Null
@type x: any legal asciidata type
'''
self._strip(-1,x)
self._strip(0,x)
def toSExtractor(self):
"""
convenience function to set the ouput to be in SEextractor style
"""
self.headerComment = 1
self.columnInfo = 1
self.newcomment_char('#')
self.newdelimiter(' ')
def toplain(self):
"""
convenience procedure to toggle to plain ACSII output
delimiters are not changed
"""
self.headerComment = 1
self.columnInfo = 0
def _get_fitsname(self, filename):
"""
Determines the fitsname for a given file name
@param filename: the input filename
@type filename: string
@return: the name of the fits file
@rtype: string
"""
# search for the extension
dot_pos = filename.rfind('.')
# if an extension exists
if dot_pos > -1:
# replace the old extension with '.fits'
fits_name = filename[:dot_pos] + '.fits'
else:
# append the extension '.fits'
fits_name = filename + '.fits'
# return the fits name
return fits_name
def _get_htmlname(self, filename):
"""
Determines the html name for a given file name
@param filename: the input filename
@type filename: string
@return: the name for the html file
@rtype: string
"""
# search for the extension
dot_pos = filename.rfind('.')
# if an extension exists
if dot_pos > -1:
# replace the old extension with '.html'
html_name = filename[:dot_pos] + '.html'
else:
# append the extension '.html'
html_name = filename + '.html'
# return the html name
return html_name
def _get_latexname(self, filename):
"""
Determines the latex filename for a given file name
@param filename: the input filename
@type filename: string
@return: the name for the latex file
@rtype: string
"""
# search for the extension
dot_pos = filename.rfind('.')
# if an extension exists
if dot_pos > -1:
# replace the old extension with '.html'
latex_name = filename[:dot_pos] + '.tex'
else:
# append the extension '.html'
latex_name = filename + '.tex'
# return the html name
return latex_name
def _get_lineparams(self, tr_attr=None, td_attr=None):
"""
Prints the AsciiData object as table in html-file
@param tr_attr: attributes for the tr-tag
@type tr_attr: string
@param td_attr: attributes for the td-tag
@type td_attr: string
@return: the html-table linestart, delimiter and lineend
@rtype: string, string, string
"""
# form the string for the tr-attributes
if tr_attr == None:
str_tr_add = ''
else:
str_tr_add = ' ' + tr_attr
# form the string for the td-attributes
if td_attr == None:
str_td_add = ''
else:
str_td_add = ' ' + td_attr
# compose linestart, delimiter and lineend
lstart = '<tr'+str_tr_add+'><td'+str_td_add+'>'
delim = '</td><td'+str_td_add+'>'
lend = '</td></tr>\n'
# return linestart, delimiter, lineend
return lstart, delim, lend
def _loc_column(self, element):
"""
Localizes a column
The method localizes the column from any possible input.
Possible input is either the column name or column index.
Basic checks are done whether the column exists.
@param element: either column index or name
@type element: string/integer
@return: the column index
@rtype: integer
"""
# create an element
elem = Element(element)
# check the types and derive the column index
if elem.get_type() == int:
# check for -1, which indicates the last column
if element == -1:
# set the index of the last column
index = self.ncols-1
else:
# set the index to the input index
index = element
elif elem.get_type() == str:
index = self.find(element)
# check whether the column index exists
# raise an error if not
if index > self.ncols-1:
err_msg = 'Index: '+str(index)+' is larger than ncols: ' +str(self.ncols)+'!!'
raise Exception(err_msg)
elif index < 0:
raise ColumnError('Column name: "'+element+'" does not exist!')
# return the index
return index
def _load_columns(self, filename, null, comment_char, separator):
"""
Transforms the content of a file into columns
Opens the file, defines the columns, adds all data rows,
and returns the columns.
@param filename: the filename to create the AsciiData from
@type filename: string
@param null: string to be interpreted as NULL
@type null: string
@param separator: string to be used as delimiter
@type separator: string
@param comment_char: string to be used as comment character
@type comment_char: string
@return: the columns loaded
@rtype: [AsciiColumn]
"""
undef_cols = []
collist = []
# open the file, and parse through all rows
for line in file(filename, 'r'):
# throw away trailing and leading whitespaces
str_line = line.strip()
if len(str_line) < 1 or str_line[0] == comment_char:
continue
# if collumns exist, add a row
if collist:
self._add_row(collist, line, null, separator)
# if columns do not exist, define them
else:
collist = self._define_cols(line, null, separator)
# return the column list
return collist
def _find_undefined_cols(self, collist):
"""
Finds undefined columns
The method finds undefined columns in a column list.
An undefined column is a column with the flag "self._defined"
not set. This means that column type and column format
are not specified, and the column elements are Null.
The indices of the undefined columns is returned as a list
@param collist: the list of existing columns
@type collist: list of AsciiColumns
@return: a list with the indices of undefined columns
@rtype: [integer]
"""
undefined = []
# go over each column
index=0
for col in collist:
# check whether the column is defined
# append the index to the list if not
if not col.get_defined():
undefined.append(index)
# increment the index
index = index+1
# return the list
return undefined
def _add_row(self, collist, line, null, separator):
"""
Adds a line from the file to the column list.
The method gets a line from the input file.
The line is split up into its items.
Then each item is added to the column
it belongs to. Items matching the NULL
string are added as "None". A delimiter
is taken into account in the splitting,
if specified.
@param collist: the list of existing columns
@type collist: list of AsciiColumns
@param line: the line to be added to the columns
@type line: string
@param null: string to be interpretet as NULL
@type null: string
@param separator: string to be used as delimiter
@type separator: string
"""
# split the line, either according toa whitespace,
# or according to a specified delimiter
items = separator.separate(line)
# check whether there is an item for each column
if len(collist) != len(items):
err_msg = "Number of columns does not fit to number of items in " + line
raise Exception(err_msg)
# go over each item
index = 0
for item in items:
# check whether the item is NULL.
# add the item to the column,
# using 'None' for NULL items
if null.count(item.strip()) > 0:
collist[index].add_element(None)
else:
collist[index].add_element(item)
# increment the index
index += 1
def _define_cols(self, line, null, separator):
"""
Defines the columns from an input line.
The method splits an ascii line from the input file into its
items. For each item a new column is created and added
to a column list. The column list is finally returned.
@param line: the line to be added to the columns
@type line: string
@param null: string to be interpretet as NULL
@type null: string
@param separator: string to be used as delimiter
@type separator: string
@return: the columns created
@rtype: [AsciiColumn]
"""
collist = []
# split the line, either according toa whitespace,
# or according to a specified delimiter
items = separator.separate(line)
# go over each item, and create a column
# for each. NULL items are transformed to 'None'
index = 0
for item in items:
# set the default column unit and comment
colunit = ''
colcomment = ''
# check whether there is column
# information from the header
if self.header.SExtractorFlag:
# extract the header information
colname,colunit,colcomment = self.header.getCollInfo(index)
else:
# make the default column name
colname = self._def_colname(index)
# check whether the element is a NULL-value
if null.count(item.strip()) > 0:
# append an undefined column
collist.append(AsciiColumn(element=[None], colname=colname,
null=null))
else:
# append a defined column
collist.append(AsciiColumn(element=[item], colname=colname,
null=null))
# transfer the resto of the column information
if colunit:
collist[-1].set_unit(colunit)
if colcomment:
collist[-1].set_colcomment(colcomment)
# increment the index
index += 1
# return the column list
return collist
def _print_tostream(self, nprinter, colInfo, headComment):
"""
Prints the AsciiData to a stream
The method forms for each row in the AsciiData a list
with formated strings, each list element representing
one element. The list is sent to a printing stream
which is responsible for the output.
@param nprinter: the NicePrinter object with the stream
@type nprinter: NicePrinter
"""
# print the column information
if colInfo:
for n, col in enumerate(self.columns):
nprinter.print_string(col.collheader(n,self._comment_char))
# print the header
if headComment:
nprinter.print_string(str(self.header))
# print the data
# go over each row
for ii in range(self.nrows):
# create the string list
strlist = self._row_tostring(ii)
# send the list to the printer
nprinter.print_list(strlist)
def _row_tostring(self, index):
"""
Creates the formatted string list for one row.
The method extracts from each column the formatted
string representation of the element in a specified
row. The list of strings is returned.
@param index:
@type index: integer
@return: the list with formatted strings
@rtype: [string]
"""
# initialize the list
strlist = []
# go over each column
for jj in range(self.ncols):
# append the string of the requested
# element to the list
strlist.append(self.columns[jj].fprint_elem(index))
# return the list
return strlist
def _strip(self,rowindex, x=None):
'''
Removes rows which contain the value of x
null is default (and the only value which really works)
@param rowindex: select if it is lstrip (0) or rstrip (-1)
@type rowindex: int
'''
while self.nrows>0:
equal = True
for col in self.columns:
equal = equal and (col[rowindex] == x)
if equal:
self.delete(rowindex)
else:
break
|
spacetelescopeREPO_NAMEstsdas_strippedPATH_START.@stsdas_stripped_extracted@stsdas_stripped-master@stsdas@pkg@analysis@slitless@axe@[email protected]@.PATH_END.py
|
{
"filename": "_tickcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/_tickcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="tickcolor",
parent_name="histogram2dcontour.colorbar",
**kwargs,
):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram2dcontour@colorbar@[email protected]_END.py
|
{
"filename": "saved_model_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/backend/tensorflow/saved_model_test.py",
"type": "Python"
}
|
"""Tests for SavedModel functionality under tf implementation."""
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import testing
from keras.src.saving import object_registration
from keras.src.testing.test_utils import named_product
@object_registration.register_keras_serializable(package="my_package")
class CustomModelX(models.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = layers.Dense(1)
self.dense2 = layers.Dense(1)
def call(self, inputs):
out = self.dense1(inputs)
return self.dense2(out)
def one(self):
return 1
@object_registration.register_keras_serializable(package="my_package")
class CustomSignatureModel(models.Model):
def __init__(self):
super(CustomSignatureModel, self).__init__()
self.v = tf.Variable(1.0)
@tf.function
def __call__(self, x):
return x * self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def mutate(self, new_v):
self.v.assign(new_v)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The SavedModel test can only run with TF backend.",
)
class SavedModelTest(testing.TestCase):
def test_sequential(self):
model = models.Sequential([layers.Dense(1)])
model.compile(loss="mse", optimizer="adam")
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_functional(self):
inputs = layers.Input(shape=(3,))
x = layers.Dense(1, name="first_dense")(inputs)
outputs = layers.Dense(1, name="second_dense")(x)
model = models.Model(inputs, outputs)
model.compile(
optimizer="adam",
loss="mse",
)
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_subclassed(self):
model = CustomModelX()
model.compile(
optimizer="adam",
loss="mse",
metrics=[metrics.Hinge(), "mse"],
)
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_custom_model_and_layer(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayer(layers.Layer):
def __call__(self, inputs):
return inputs
@object_registration.register_keras_serializable(package="my_package")
class Model(models.Model):
def __init__(self):
super().__init__()
self.layer = CustomLayer()
@tf.function(input_signature=[tf.TensorSpec([None, 1])])
def call(self, inputs):
return self.layer(inputs)
model = Model()
inp = np.array([[1.0]])
result = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
result,
restored_model.call(inp),
rtol=1e-4,
atol=1e-4,
)
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return x + ops.mean(y, axis=1)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return x + ops.mean(y, axis=1)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return x + ops.mean(y, axis=1)
input_x = tf.constant([1.0])
input_y = tf.constant([[1.0, 0.0, 2.0]])
if struct_type == "tuple":
model = TupleModel()
inputs = (input_x, input_y)
elif struct_type == "array":
model = ArrayModel()
inputs = [input_x, input_y]
elif struct_type == "dict":
model = DictModel()
inputs = {"x": input_x, "y": input_y}
result = model(inputs)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
outputs = restored_model.signatures["serving_default"](
inputs=input_x, inputs_1=input_y
)
self.assertAllClose(result, outputs["output_0"], rtol=1e-4, atol=1e-4)
def test_multi_input_model(self):
input_1 = layers.Input(shape=(3,))
input_2 = layers.Input(shape=(5,))
model = models.Model([input_1, input_2], [input_1, input_2])
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
input_arr_1 = np.random.random((1, 3)).astype("float32")
input_arr_2 = np.random.random((1, 5)).astype("float32")
outputs = restored_model.signatures["serving_default"](
inputs=tf.convert_to_tensor(input_arr_1, dtype=tf.float32),
inputs_1=tf.convert_to_tensor(input_arr_2, dtype=tf.float32),
)
self.assertAllClose(
input_arr_1, outputs["output_0"], rtol=1e-4, atol=1e-4
)
self.assertAllClose(
input_arr_2, outputs["output_1"], rtol=1e-4, atol=1e-4
)
def test_multi_input_custom_model_and_layer(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayer(layers.Layer):
def build(self, *input_shape):
self.built = True
def call(self, *input_list):
self.add_loss(input_list[-2] * 2)
return sum(input_list)
@object_registration.register_keras_serializable(package="my_package")
class CustomModel(models.Model):
def build(self, *input_shape):
self.layer = CustomLayer()
self.layer.build(*input_shape)
self.built = True
@tf.function
def call(self, *inputs):
inputs = list(inputs)
return self.layer(*inputs)
model = CustomModel()
inp = [
tf.constant(i, shape=[1, 1], dtype=tf.float32) for i in range(1, 4)
]
expected = model(*inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
output = restored_model.call(*inp)
self.assertAllClose(expected, output, rtol=1e-4, atol=1e-4)
def test_list_trackable_children_tracking(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayerList(layers.Layer):
def __init__(self):
super().__init__()
self.sublayers = [
layers.Dense(2),
layers.Dense(2),
]
def call(self, inputs):
x = inputs
for sublayer in self.sublayers:
x = sublayer(x)
return x
inputs = layers.Input(shape=(1,))
outputs = CustomLayerList()(inputs)
model = models.Model(inputs, outputs)
inp = np.array([[1.0]])
expected = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
expected,
restored_model.signatures["serving_default"](
tf.convert_to_tensor(inp, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_dict_trackable_children_tracking(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayerDict(layers.Layer):
def __init__(self):
super().__init__()
self.sublayers = {
"first_layer": layers.Dense(2),
"second_layer": layers.Dense(2),
}
def call(self, inputs):
x = inputs
for key, sublayer in self.sublayers.items():
x = sublayer(x)
return x
inputs = layers.Input(shape=(1,))
outputs = CustomLayerDict()(inputs)
model = models.Model(inputs, outputs)
inp = np.array([[1.0]])
expected = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
expected,
restored_model.signatures["serving_default"](
tf.convert_to_tensor(inp, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_fixed_signature_string_dtype(self):
@object_registration.register_keras_serializable(package="my_package")
class Adder(models.Model):
@tf.function(
input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)]
)
def concat(self, x):
return x + x
model = Adder()
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertEqual(model.concat("hello"), restored_model.concat("hello"))
def test_non_fixed_signature_string_dtype(self):
@object_registration.register_keras_serializable(package="my_package")
class Adder(models.Model):
@tf.function
def concat(self, x):
return x + x
model = Adder()
no_fn_path = os.path.join(self.get_temp_dir(), "my_keras_model_no_fn")
tf.saved_model.save(model, no_fn_path)
restored_model = tf.saved_model.load(no_fn_path)
with self.assertRaisesRegex(ValueError, "zero restored functions"):
_ = restored_model.concat("hello")
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(
model,
path,
signatures=model.concat.get_concrete_function(
tf.TensorSpec(shape=[], dtype=tf.string, name="string_input")
),
)
restored_model = tf.saved_model.load(path)
self.assertEqual(model.concat("hello"), restored_model.concat("hello"))
def test_fine_tuning(self):
model = CustomSignatureModel()
model_no_signatures_path = os.path.join(
self.get_temp_dir(), "model_no_signatures"
)
_ = model(tf.constant(0.0))
tf.saved_model.save(model, model_no_signatures_path)
restored_model = tf.saved_model.load(model_no_signatures_path)
self.assertLen(list(restored_model.signatures.keys()), 0)
self.assertEqual(restored_model(tf.constant(3.0)).numpy(), 3)
restored_model.mutate(tf.constant(2.0))
self.assertEqual(restored_model(tf.constant(3.0)).numpy(), 6)
optimizer = optimizers.SGD(0.05)
def train_step():
with tf.GradientTape() as tape:
loss = (10.0 - restored_model(tf.constant(2.0))) ** 2
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
return loss
for _ in range(10):
# "v" approaches 5, "loss" approaches 0
loss = train_step()
self.assertAllClose(loss, 0.0, rtol=1e-2, atol=1e-2)
self.assertAllClose(restored_model.v.numpy(), 5.0, rtol=1e-2, atol=1e-2)
def test_signatures_path(self):
model = CustomSignatureModel()
model_with_signature_path = os.path.join(
self.get_temp_dir(), "model_with_signature"
)
call = model.__call__.get_concrete_function(
tf.TensorSpec(None, tf.float32)
)
tf.saved_model.save(model, model_with_signature_path, signatures=call)
restored_model = tf.saved_model.load(model_with_signature_path)
self.assertEqual(
list(restored_model.signatures.keys()), ["serving_default"]
)
def test_multiple_signatures_dict_path(self):
model = CustomSignatureModel()
model_multiple_signatures_path = os.path.join(
self.get_temp_dir(), "model_with_multiple_signatures"
)
call = model.__call__.get_concrete_function(
tf.TensorSpec(None, tf.float32)
)
signatures = {
"serving_default": call,
"array_input": model.__call__.get_concrete_function(
tf.TensorSpec([None], tf.float32)
),
}
tf.saved_model.save(
model, model_multiple_signatures_path, signatures=signatures
)
restored_model = tf.saved_model.load(model_multiple_signatures_path)
self.assertEqual(
list(restored_model.signatures.keys()),
["serving_default", "array_input"],
)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@backend@tensorflow@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "CosmoLike/cocoa",
"repo_path": "cocoa_extracted/cocoa-main/cocoa_installation_libraries/cobaya_changes/cobaya/likelihoods/planck_2018_lowl/__init__.py",
"type": "Python"
}
|
CosmoLikeREPO_NAMEcocoaPATH_START.@cocoa_extracted@cocoa-main@cocoa_installation_libraries@cobaya_changes@cobaya@likelihoods@planck_2018_lowl@[email protected]_END.py
|
|
{
"filename": "_style.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/title/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="scattercarpet.marker.colorbar.title.font",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scattercarpet@marker@colorbar@title@font@[email protected]_END.py
|
{
"filename": "stats.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/transitleastsquares_ES/stats.py",
"type": "Python"
}
|
from __future__ import division, print_function
import numpy
from os import path
import transitleastsquares_ES.tls_constants as tls_constants
from transitleastsquares_ES.helpers import running_median, transit_mask
from tqdm import tqdm
from transitleastsquares_ES.core import fold
def FAP(SDE):
"""Returns FAP (False Alarm Probability) for a given SDE"""
data = numpy.genfromtxt(
path.join(tls_constants.resources_dir, "fap.csv"),
dtype="f8, f8",
delimiter=",",
names=["FAP", "SDE"],
)
return data["FAP"][numpy.argmax(data["SDE"] > SDE)]
def rp_rs_from_depth(depth, law, params):
"""Takes the maximum transit depth, limb-darkening law and parameters
Returns R_P / R_S (ratio of planetary to stellar radius)
Source: Heller 2019, https://arxiv.org/abs/1901.01730"""
# Validations:
# - LD law must exist
# - All parameters must be floats or ints
# - All parameters must be given in the correct quanitity for the law
if len(params) == 1:
params = float(params[0])
if not isinstance(params, (float, int)) and not all(
isinstance(x, (float, int)) for x in params
):
raise ValueError("All limb-darkening parameters must be numbers")
laws = "linear, quadratic, squareroot, logarithmic, nonlinear"
if law not in laws:
raise ValueError("Please provide a supported limb-darkening law:", laws)
if law == "linear" and not isinstance(params, float):
raise ValueError("Please provide exactly one parameter")
if law in "quadratic, logarithmic, squareroot" and len(params) != 2:
raise ValueError("Please provide exactly two limb-darkening parameters")
if law == "nonlinear" and len(params) != 4:
raise ValueError("Please provide exactly four limb-darkening parameters")
# Actual calculations of the return value
if law == "linear":
return (depth * (1 - params / 3)) ** (1 / 2)
if law == "quadratic":
return (depth * (1 - params[0] / 3 - params[1] / 6)) ** (1 / 2)
if law == "squareroot":
return (depth * (1 - params[0] / 3 - params[1] / 5)) ** (1 / 2)
if law == "logarithmic":
return (depth * (1 + 2 * params[1] / 9 - params[0] / 3)) ** (1 / 2)
if law == "nonlinear":
return (
depth
* (1 - params[0] / 5 - params[1] / 3 - 3 * params[2] / 7 - params[3] / 2)
) ** (1 / 2)
def pink_noise(data, width):
std = 0
datapoints = len(data) - width + 1
for i in range(datapoints):
std += numpy.std(data[i : i + width]) / width ** 0.5
return std / datapoints
def period_uncertainty(periods, power):
# Determine estimate for uncertainty in period
# Method: Full width at half maximum
try:
# Upper limit
index_highest_power = numpy.argmax(power)
idx = index_highest_power
while True:
idx += 1
if power[idx] <= 0.5 * power[index_highest_power]:
idx_upper = idx
break
# Lower limit
idx = index_highest_power
while True:
idx -= 1
if power[idx] <= 0.5 * power[index_highest_power]:
idx_lower = idx
break
period_uncertainty = 0.5 * (periods[idx_upper] - periods[idx_lower])
except:
period_uncertainty = float("inf")
return period_uncertainty
def spectra(chi2, oversampling_factor):
SR = numpy.min(chi2) / chi2
SDE_raw = (1 - numpy.mean(SR)) / numpy.std(SR)
# Scale SDE_power from 0 to SDE_raw
power_raw = SR - numpy.mean(SR) # shift down to the mean being zero
scale = SDE_raw / numpy.max(power_raw) # scale factor to touch max=SDE_raw
power_raw = power_raw * scale
# Detrended SDE, named "power"
kernel = oversampling_factor * tls_constants.SDE_MEDIAN_KERNEL_SIZE
if kernel % 2 == 0:
kernel = kernel + 1
if len(power_raw) > 2 * kernel:
my_median = running_median(power_raw, kernel)
power = power_raw - my_median
# Re-normalize to range between median = 0 and peak = SDE
# shift down to the mean being zero
power = power - numpy.mean(power)
SDE = numpy.max(power / numpy.std(power))
# scale factor to touch max=SDE
scale = SDE / numpy.max(power)
power = power * scale
else:
power = power_raw
SDE = SDE_raw
return SR, power_raw, power, SDE_raw, SDE
def final_T0_fit(signal, depth, t, y, dy, period, T0_fit_margin, show_progress_bar, verbose):
""" After the search, we know the best period, width and duration.
But T0 was not preserved due to speed optimizations.
Thus, iterate over T0s using the given parameters
Fold to all T0s so that the transit is expected at phase = 0"""
dur = len(signal)
scale = tls_constants.SIGNAL_DEPTH / (1 - depth)
signal = 1 - ((1 - signal) / scale)
samples_per_period = numpy.size(y)
if T0_fit_margin == 0:
points = samples_per_period
else:
step_factor = T0_fit_margin * dur
points = int(samples_per_period / step_factor)
if points > samples_per_period:
points = samples_per_period
# Create all possible T0s from the start of [t] to [t+period] in [samples] steps
T0_array = numpy.linspace(
start=numpy.min(t), stop=numpy.min(t) + period, num=points
)
# Avoid showing progress bar when expected runtime is short
if points > tls_constants.PROGRESSBAR_THRESHOLD and show_progress_bar:
show_progress_info = True
else:
show_progress_info = False
residuals_lowest = float("inf")
T0 = 0
if verbose:
print("Searching for best T0 for period", format(period, ".5f"), "days")
if show_progress_info:
pbar2 = tqdm(total=numpy.size(T0_array))
signal_ootr = numpy.ones(len(y[dur:]))
# Future speed improvement possible: Add multiprocessing. Will be slower for
# short data and T0_FIT_MARGIN > 0.01, but faster for large data with dense
# sampling (T0_FIT_MARGIN=0)
for Tx in T0_array:
phases = fold(time=t, period=period, T0=Tx)
sort_index = numpy.argsort(phases, kind="mergesort") # 75% of CPU time
phases = phases[sort_index]
flux = y[sort_index]
dy = dy[sort_index]
# Roll so that the signal starts at index 0
# Numpy roll is slow, so we replace it with less elegant concatenate
# flux = numpy.roll(flux, roll_cadences)
# dy = numpy.roll(dy, roll_cadences)
roll_cadences = int(dur / 2) + 1
flux = numpy.concatenate([flux[-roll_cadences:], flux[:-roll_cadences]])
dy = numpy.concatenate([flux[-roll_cadences:], flux[:-roll_cadences]])
residuals_intransit = numpy.sum((flux[:dur] - signal) ** 2 / dy[:dur] ** 2)
residuals_ootr = numpy.sum((flux[dur:] - signal_ootr) ** 2 / dy[dur:] ** 2)
residuals_total = residuals_intransit + residuals_ootr
if show_progress_info:
pbar2.update(1)
if residuals_total < residuals_lowest:
residuals_lowest = residuals_total
T0 = Tx
if show_progress_info:
pbar2.close()
return T0
def model_lightcurve(transit_times, period, t, model_transit_single):
"""Creates the model light curve for the full unfolded dataset"""
# Append one more transit after and before end of nominal time series
# to fully cover beginning and end with out of transit calculations
earlier_tt = transit_times[0] - period
extended_transit_times = numpy.append(earlier_tt, transit_times)
next_tt = transit_times[-1] + period
extended_transit_times = numpy.append(extended_transit_times, next_tt)
full_x_array = numpy.array([])
full_y_array = numpy.array([])
rounds = len(extended_transit_times)
internal_samples = (
int(len(t) / len(transit_times))
) * tls_constants.OVERSAMPLE_MODEL_LIGHT_CURVE
# Append all periods
for i in range(rounds):
xmin = extended_transit_times[i] - period / 2
xmax = extended_transit_times[i] + period / 2
x_array = numpy.linspace(xmin, xmax, internal_samples)
full_x_array = numpy.append(full_x_array, x_array)
full_y_array = numpy.append(full_y_array, model_transit_single)
if numpy.all(numpy.isnan(full_x_array)):
return None, None
else: # Determine start and end of relevant time series, and crop it
start_cadence = numpy.nanargmax(full_x_array > min(t))
stop_cadence = numpy.nanargmax(full_x_array > max(t))
full_x_array = full_x_array[start_cadence:stop_cadence]
full_y_array = full_y_array[start_cadence:stop_cadence]
model_lightcurve_model = full_y_array
model_lightcurve_time = full_x_array
return model_lightcurve_model, model_lightcurve_time
def all_transit_times(T0, t, period):
"""Return all mid-transit times within t"""
if T0 < min(t):
transit_times = [T0 + period]
else:
transit_times = [T0]
previous_transit_time = transit_times[0]
transit_number = 0
while True:
transit_number = transit_number + 1
next_transit_time = previous_transit_time + period
if next_transit_time < (numpy.min(t) + (numpy.max(t) - numpy.min(t))):
transit_times.append(next_transit_time)
previous_transit_time = next_transit_time
else:
break
return transit_times
def calculate_transit_duration_in_days(t, period, transit_times, duration):
"""Return estimate for transit duration in days"""
# Difference between (time series duration / period) and epochs
transit_duration_in_days_raw = (
duration * calculate_stretch(t, period, transit_times) * period
)
# Correct the duration for gaps in the data
transit_duration_in_days = transit_duration_in_days_raw * calculate_fill_factor(t)
return transit_duration_in_days
def calculate_stretch(t, period, transit_times):
"""Return difference between (time series duration / period) and epochs
Example:
- Time series duration = 100 days
- Period = 40 days
- Epochs = 2 at t0s = [30, 70] days
==> stretch = (100 / 40) / 2 = 1.25"""
duration_timeseries = (numpy.max(t) - numpy.min(t)) / period
epochs = len(transit_times)
stretch = duration_timeseries / epochs
return stretch
def calculate_fill_factor(t):
"""Return the fraction of existing cadences, assuming constant cadences"""
average_cadence = numpy.median(numpy.diff(t))
span = max(t) - min(t)
theoretical_cadences = span / average_cadence
fill_factor = (len(t) - 1) / theoretical_cadences
return fill_factor
def count_stats(t, y, transit_times, transit_duration_in_days):
"""Return:
* in_transit_count: Number of data points in transit (phase-folded)
* after_transit_count: Number of data points in a bin of transit duration,
after transit (phase-folded)
* before_transit_count: Number of data points in a bin of transit duration,
before transit (phase-folded)
"""
in_transit_count = 0
after_transit_count = 0
before_transit_count = 0
for mid_transit in transit_times:
T0 = (
mid_transit - 1.5 * transit_duration_in_days
) # start of 1 transit dur before ingress
T1 = mid_transit - 0.5 * transit_duration_in_days # start of ingress
T4 = mid_transit + 0.5 * transit_duration_in_days # end of egress
T5 = (
mid_transit + 1.5 * transit_duration_in_days
) # end of egress + 1 transit dur
if T0 > min(t) and T5 < max(t): # inside time
idx_intransit = numpy.where(numpy.logical_and(t > T1, t < T4))
idx_before_transit = numpy.where(numpy.logical_and(t > T0, t < T1))
idx_after_transit = numpy.where(numpy.logical_and(t > T4, t < T5))
points_in_this_in_transit = len(y[idx_intransit])
points_in_this_before_transit = len(y[idx_before_transit])
points_in_this_after_transit = len(y[idx_after_transit])
in_transit_count += points_in_this_in_transit
before_transit_count += points_in_this_before_transit
after_transit_count += points_in_this_after_transit
return in_transit_count, after_transit_count, before_transit_count
def intransit_stats(t, y, transit_times, transit_duration_in_days):
"""Return all intransit odd and even flux points"""
all_flux_intransit_odd = numpy.array([])
all_flux_intransit_even = numpy.array([])
all_flux_intransit = numpy.array([])
all_idx_intransit = numpy.array([])
per_transit_count = numpy.zeros([len(transit_times)])
transit_depths = numpy.zeros([len(transit_times)])
transit_depths_uncertainties = numpy.zeros([len(transit_times)])
for i in range(len(transit_times)):
depth_mean_odd = numpy.nan
depth_mean_even = numpy.nan
depth_mean_odd_std = numpy.nan
depth_mean_even_std = numpy.nan
mid_transit = transit_times[i]
tmin = mid_transit - 0.5 * transit_duration_in_days
tmax = mid_transit + 0.5 * transit_duration_in_days
if numpy.isnan(tmin) or numpy.isnan(tmax):
idx_intransit = []
flux_intransit = []
mean_flux = numpy.nan
else:
idx_intransit = numpy.where(numpy.logical_and(t > tmin, t < tmax))
flux_intransit = y[idx_intransit]
if len(y[idx_intransit]) > 0:
mean_flux = numpy.mean(y[idx_intransit])
else:
mean_flux = numpy.nan
intransit_points = numpy.size(y[idx_intransit])
transit_depths[i] = mean_flux
if len(y[idx_intransit] > 0):
transit_depths_uncertainties[i] = numpy.std(y[idx_intransit]) / numpy.sqrt(
intransit_points
)
else:
transit_depths_uncertainties[i] = numpy.nan
per_transit_count[i] = intransit_points
# Check if transit odd/even to collect the flux for the mean calculations
if i % 2 == 0: # even
all_flux_intransit_even = numpy.append(
all_flux_intransit_even, flux_intransit
)
else: # odd
all_flux_intransit_odd = numpy.append(
all_flux_intransit_odd, flux_intransit
)
if len(all_flux_intransit_odd) > 0:
depth_mean_odd = numpy.mean(all_flux_intransit_odd)
depth_mean_odd_std = numpy.std(all_flux_intransit_odd) / numpy.sum(
len(all_flux_intransit_odd)
) ** (0.5)
if len(all_flux_intransit_even) > 0:
depth_mean_even = numpy.mean(all_flux_intransit_even)
depth_mean_even_std = numpy.std(all_flux_intransit_even) / numpy.sum(
len(all_flux_intransit_even)
) ** (0.5)
return (
depth_mean_odd,
depth_mean_even,
depth_mean_odd_std,
depth_mean_even_std,
all_flux_intransit_odd,
all_flux_intransit_even,
per_transit_count,
transit_depths,
transit_depths_uncertainties,
)
def snr_stats(
t,
y,
period,
duration,
T0,
transit_times,
transit_duration_in_days,
per_transit_count,
):
"""Return snr_per_transit and snr_pink_per_transit"""
snr_per_transit = numpy.zeros([len(transit_times)])
snr_pink_per_transit = numpy.zeros([len(transit_times)])
intransit = transit_mask(t, period, 2 * duration, T0)
flux_ootr = y[~intransit]
try:
pinknoise = pink_noise(flux_ootr, int(numpy.mean(per_transit_count)))
except:
pinknoise = numpy.nan
# Estimate SNR and pink SNR
# Second run because now the out of transit points are known
if len(flux_ootr) > 0:
std = numpy.std(flux_ootr)
else:
std = numpy.nan
for i in range(len(transit_times)):
mid_transit = transit_times[i]
tmin = mid_transit - 0.5 * transit_duration_in_days
tmax = mid_transit + 0.5 * transit_duration_in_days
if numpy.isnan(tmin) or numpy.isnan(tmax):
idx_intransit = []
mean_flux = numpy.nan
else:
idx_intransit = numpy.where(numpy.logical_and(t > tmin, t < tmax))
if len(y[idx_intransit]) > 0:
mean_flux = numpy.mean(y[idx_intransit])
else:
mean_flux = numpy.nan
intransit_points = numpy.size(y[idx_intransit])
try:
snr_pink_per_transit[i] = (1 - mean_flux) / pinknoise
if intransit_points > 0 and not numpy.isnan(std):
std_binned = std / intransit_points ** 0.5
snr_per_transit[i] = (1 - mean_flux) / std_binned
else:
snr_per_transit[i] = 0
snr_pink_per_transit[i] = 0
except:
snr_per_transit[i] = 0
snr_pink_per_transit[i] = 0
return snr_per_transit, snr_pink_per_transit
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@[email protected]@.PATH_END.py
|
{
"filename": "rfi_inspect_2458041.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/rfi_inspect/rfi_inspect_2458041.ipynb",
"type": "Jupyter Notebook"
}
|
# RFI Inspection Daily RTP Notebook
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import glob
import os
from astropy import units
from copy import deepcopy
from pyuvdata import UVFlag
import matplotlib.colors as colors
from matplotlib import cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
JD = int(JD)
```
JD = "2458041"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458041"
```python
uvf = UVFlag(f'{data_path}/zen.{JD}.total_threshold_and_a_priori_flags.h5')
# Load in the metadata for easier plotting.
freqs = np.unique(uvf.freq_array)
times = np.unique(uvf.time_array)
lsts = np.unique(uvf.lst_array)
chans = np.arange(freqs.size)
plot_times = times - np.floor(times[0])
lsts_hr = lsts * units.rad.to("cycle") * units.day.to("hr")
freqs_MHz = freqs * units.Hz.to("MHz")
extent = (freqs_MHz[0], freqs_MHz[-1], plot_times[-1], plot_times[0])
```
```python
plt.figure(figsize=(16,12))
cax = plt.imshow(uvf.flag_array[:,:,0], aspect='auto', interpolation='nearest',
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
Text(0.5, 0, 'Channel')

# Figure 1(a): Full day of XRFI flags
Yellow is flagged. Blue is unflagged.
```python
xrfi_dirs = sorted(glob.glob(f'{data_path}/zen.{JD}.?????.xrfi'))
print(f'Found {len(xrfi_dirs)} directories containing XRFI intermediate data products.')
files1 = [glob.glob(f'{d}/*combined_metrics1.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files1)} combined round 1 XRFI metrics files.')
files2 = [glob.glob(f'{d}/*combined_metrics2.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files2)} combined round 2 XRFI metrics files.')
uvf1 = UVFlag(files1)
uvf2 = UVFlag(files2)
uvf2.metric_array = np.where(np.isinf(uvf2.metric_array), uvf1.metric_array,
uvf2.metric_array)
```
Found 73 directories containing XRFI intermediate data products.
Found 73 combined round 1 XRFI metrics files.
Found 73 combined round 2 XRFI metrics files.
```python
plt.figure(figsize=(16,12))
max_abs = 100
if np.max(uvf2.metric_array) > max_abs:
extend = 'max'
if np.min(uvf2.metric_array) < -max_abs:
extend = 'both'
elif np.min(uvf2.metric_array) < -max_abs:
extend = 'min'
else:
extend = 'neither'
plt.imshow(uvf2.metric_array[:,:,0], aspect='auto', cmap='RdBu_r',
norm=colors.SymLogNorm(linthresh=1,vmin=-max_abs, vmax=max_abs),
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.colorbar(pad=.07, extend=extend,
label='RFI Detection Significance ($\sigma$s)')
plt.title('Combined XRFI Metrics')
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
default base will change from np.e to 10 in 3.4. To suppress this warning specify the base keyword argument.
Text(0.5, 0, 'Channel')

## Figure 2(a): Combined XRFI Detection Significance
This figure shows round 2 XRFI metrics (mean filter outliers) combined in quadrature. When flagged in round 1 of XRFI, round 1's combined median filter metrics are used instead.
```python
# Load in the flags from each round of XRFI flagging.
low_level_flag_labels = (
"abscal_chi_sq_flags1",
"abscal_chi_sq_flags2",
"ag_flags1",
"ag_flags2",
"apriori_flags",
"auto_flags1",
"auto_flags2",
"ax_flags1",
"ax_flags2",
"combined_flags1",
"combined_flags2",
"cross_flags1",
"cross_flags2",
"flags1",
"flags2",
"og_flags1",
"og_flags2",
"omnical_chi_sq_flags1",
"omnical_chi_sq_flags2",
"ox_flags1",
"ox_flags2",
"v_flags1",
"v_flags2",
)
# Keep the thresholded flags separate for easier analysis.
thresholded_flag_labels = (
"abscal_chi_sq_renormed_threshold_flags",
"ag_threshold_flags",
"auto_threshold_flags",
"ax_threshold_flags",
"combined_threshold_flags",
"cross_threshold_flags",
"og_threshold_flags",
"omnical_chi_sq_renormed_threshold_flags",
"ox_threshold_flags",
"v_threshold_flags",
"total_threshold_and_a_priori_flags",
)
low_level_flags = {}
for file_id in low_level_flag_labels:
flag_files = []
for xrfi_dir in xrfi_dirs:
matching_files = glob.glob(os.path.join(xrfi_dir, f"*.{file_id}.h5"))
if len(matching_files) > 0:
flag_files.append(matching_files[0])
if len(flag_files) > 0:
uvf = UVFlag(flag_files)
low_level_flags[file_id] = np.squeeze(uvf.flag_array)
thresholded_flags = {}
for file_id in thresholded_flag_labels:
flag_file = f"{data_path}/zen.{JD}.{file_id}.h5"
if os.path.exists(flag_file):
uvf = UVFlag(flag_file)
thresholded_flags[file_id] = np.squeeze(uvf.flag_array)
all_flags = dict(**low_level_flags, **thresholded_flags)
```
```python
label_mapping = {
f"Round {i}": {
"Priors": ("apriori_flags", "flags1")[i-1],
"Autocorrs": f"auto_flags{i}",
"Crosscorrs": f"cross_flags{i}",
"Omnical\nVisibilities": f"v_flags{i}",
"Omnical\nGains": f"og_flags{i}",
r"Omnical $\chi^2$": f"ox_flags{i}",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_flags{i}",
"Abscal\nGains": f"ag_flags{i}",
r"Abscal $\chi^2$": f"ax_flags{i}",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_flags{i}",
"Combined\nMetrics": f"combined_flags{i}",
} for i in (1,2)
}
label_mapping["Round 3"] = {
"Priors": "flags2",
"Autocorrs": "auto_threshold_flags",
"Crosscorrs": "cross_threshold_flags",
"Omnical\nGains": "og_threshold_flags",
r"Omnical $\chi^2$": "ox_threshold_flags",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_renormed_threshold_flags",
"Omnical\nVisibilities": "v_threshold_flags",
"Abscal\nGains": "ag_threshold_flags",
r"Abscal $\chi^2$": "ax_threshold_flags",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_renormed_threshold_flags",
"Combined\nMetrics": "combined_threshold_flags",
'Final\nFlags': "total_threshold_and_a_priori_flags",
}
# remove labels for metrics not used
label_mapping = {rnd: {label: flags for label, flags in labels.items() if flags in all_flags}
for rnd, labels in label_mapping.items()}
```
```python
# Pick easily distinguishable colors
color_palette = (
'#000000', #black
'#ffffff', #white
'#800000', #maroon
'#808000', #olive
'#008b8b', #darkcyan
'#000080', #navy
'#ff8c00', #darkorange
'#ffff00', #yellow
'#00ff00', #lime
'#0000ff', #blue
'#ff00ff', #fuchsia
'#1e90ff', #dodgerblue
'#98fb98', #palegreen
'#ff1493', #deeppink
)
# assign a unique color to a label
label_to_color_map = {"Unflagged": color_palette[0]}
color_index = 1
for mapping in label_mapping.values():
for label in tuple(mapping.keys()) + ("2+ Separate\nMetrics",):
if label not in label_to_color_map:
label_to_color_map[label] = color_palette[color_index]
color_index += 1
```
```python
# Figure out which flags are unique to each step and source
unique_flags_by_stage = {}
for round_label, mapping in label_mapping.items():
unique_flags_by_stage[round_label] = {}
# handle prior flags
prior_flags = low_level_flags[mapping["Priors"]]
unique_flags_by_stage[round_label]["Priors"] = prior_flags
# handle all other flag types
overlap_flags = np.zeros_like(np.squeeze(uvf.flag_array))
for label, file_id in mapping.items():
if label in ["Priors", "Final\nFlags", "Combined\nMetrics"]: # skip these, they are special
continue
flags = all_flags[file_id]
unique_flags = flags.copy()
for other_label, other_file_id in mapping.items():
if other_label in [label, "Priors", "Final\nFlags", "Combined\nMetrics"]:
continue
other_flags = all_flags[other_file_id]
unique_flags &= ~other_flags
overlap_region = flags & other_flags & ~prior_flags
overlap_flags[overlap_region] = True
unique_flags_by_stage[round_label][label] = unique_flags
unique_flags_by_stage[round_label]["2+ Separate\nMetrics"] = overlap_flags
# handle combined metrics separately so that it doesn't affect "2+ Separate\nMetrics"
all_flags_so_far = np.sum(list(unique_flags_by_stage[round_label].values()), axis=0).astype(bool)
combined_metrics_flags = all_flags[mapping["Combined\nMetrics"]]
unique_flags_by_stage[round_label]["Combined\nMetrics"] = combined_metrics_flags & ~all_flags_so_far
# Figure out which flags got applied at the very end when the a priori YAML was used
all_other_round_3_flags = np.sum([flags for flags in unique_flags_by_stage['Round 3'].values()], axis=0).astype(bool)
unique_flags_by_stage['Round 3']["Final\nFlags"] = all_flags[label_mapping['Round 3']["Final\nFlags"]] & (~all_other_round_3_flags)
```
```python
cmap = plt.cm.colors.ListedColormap(list(label_to_color_map.values()))
norm = plt.cm.colors.Normalize(vmin=0, vmax=1)
smap = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
colored_flags = {}
for round_label, flag_dict in unique_flags_by_stage.items():
colored_flags[round_label] = np.zeros(np.squeeze(uvf.flag_array).shape)
for label, flags in flag_dict.items():
colored_flags[round_label][flags] = list(label_to_color_map.keys()).index(label) / (len(label_to_color_map) - 1)
```
```python
def plot_flag_evolution(freq_slice):
fig, axes = plt.subplots(len(colored_flags), figsize=(15, 11 * len(colored_flags)), dpi=300)
# Figure out the details for which part of the flag arrays to plot.
tmin, tmax = plot_times[0], plot_times[-1]
lstmin, lstmax = lsts_hr[0], lsts_hr[-1]
fmin, fmax = freqs_MHz[freq_slice][::freq_slice.size - 1]
extent = (fmin, fmax, tmax, tmin)
# Actually plot the things.
for ax, (label, flags) in zip(axes, colored_flags.items()):
ax.set_title(label, fontsize=16)
ax.imshow(flags[:,freq_slice], aspect="auto", extent=extent, cmap=cmap, vmin=0, vmax=1)
twinx = ax.twinx()
twiny = ax.twiny()
twinx.set_ylim(lstmax, lstmin)
twiny.set_xlim(freq_slice[0], freq_slice[-1])
ax.set_xlabel("Frequency (MHz)", fontsize=12)
ax.set_ylabel(f"JD - {JD}", fontsize=12)
twinx.set_ylabel("LST (hour)", fontsize=12)
twiny.set_xlabel("Channel", fontsize=12)
fig.tight_layout()
for ax in axes.ravel():
cbar = fig.colorbar(smap, ax=ax, orientation="horizontal", pad=0.1)
cbar.set_ticks(np.linspace(0, 1, 2 * len(cmap.colors) + 1)[1::2])
cbar.set_ticklabels(list(label_to_color_map.keys()))
```
```python
# Plot flags in the low-band.
if np.any(freqs_MHz < 100):
freq_slice = np.argwhere(freqs_MHz < 100).flatten() # Low-band, pre-FM
plot_flag_evolution(freq_slice)
```
## Figure 3: Flag Evolution in the Low Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies below the FM band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files. *Note: for H1C data, this plot will be skipped.*
```python
# Plot flags in the mid-band.
freq_slice = np.argwhere(np.logical_and(freqs_MHz >= 100, freqs_MHz < 200)).flatten()
plot_flag_evolution(freq_slice)
```

## Figure 4: Flag Evolution in the Mid-Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies between the FM band and the analog TV band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files.
```python
# Calculate occupancies for different important sets of flags.
label_mapping = {
"A Priori": "apriori_flags",
"Median Filter": "flags1",
"Mean Filter": "flags2",
"Thresholding": "total_threshold_and_a_priori_flags",
}
occupancies = {}
for axis, axis_label in enumerate(("Frequency", "Time")):
occupancies[axis_label] = {}
for flag_label, flag_id in label_mapping.items():
flags = all_flags[flag_id]
occupancies[axis_label][flag_label] = flags.mean(axis=(1-axis))
```
```python
fig, axes = plt.subplots(2, figsize=(15,14), dpi=200)
for i, items in enumerate(zip(axes.ravel(), occupancies.items())):
ax, (occupancy_axis, flag_dict) = items
xvalues = (plot_times, freqs_MHz)[i]
alt_xvalues = (lsts_hr, chans)[i]
xlabel = (f"JD - {JD}", "Frequency (MHz)")[i]
ylabel = (
"Fraction of Channels Flagged",
"Fraction of Integrations Flagged"
)[i]
alt_xlabel = ("LST (hours)", "Channel")[i]
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
for flag_label, occupancy in flag_dict.items():
ax.plot(xvalues, occupancy, label=flag_label)
twin_ax = ax.twiny()
twin_ax.set_xlim(alt_xvalues[0], alt_xvalues[-1])
twin_ax.set_xlabel(alt_xlabel, fontsize=12)
ax.legend()
```

## Figure 5: Flagging Occupancies
These plots show the flagging occupancies for the Round 0 Flags (Apriori), Round 1 Flags (Median Filter), Round 2 Flags (Mean Filter), and Round 3 Flags (Thresholding). The top plot shows the fraction of channels flagged at each integration for each set of flags, and the bottom plot shows the fraction of integrations flagged as a function of frequency.
# Metadata
```python
from hera_qm import version
print(version.construct_version_info())
```
{'version': '1.0', 'git_origin': '[email protected]:HERA-Team/hera_qm.git', 'git_hash': 'a15c511f7e0fc30602257c9eb5ff761bc83ef6a5', 'git_description': 'v1.1-313-ga15c511', 'git_branch': 'master'}
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@rfi_inspect@[email protected]_END.py
|
{
"filename": "_SpecTrails.py",
"repo_name": "tgrassi/prizmo",
"repo_path": "prizmo_extracted/prizmo-main/src_py/ChiantiPy/base/_SpecTrails.py",
"type": "Python"
}
|
"""
Base class used in several ChiantiPy objects
"""
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import ChiantiPy.tools.filters as chfilters
import ChiantiPy.tools.util as util
import ChiantiPy.tools.io as chio
import ChiantiPy.tools.data as chdata
import ChiantiPy.tools.constants as const
class specTrails(object):
"""
a collection of methods for use in spectrum calculations
"""
def __init__(self, temperature, density):
self.Temperature = temperature
self.EDensity = density
self.AbundanceName = chdata.Defaults['abundfile']
self.AbundAll = chdata.Abundance[self.AbundanceName]['abundance']
#
# ---------------------------------------------------------------------------
#
def convolve(self, wavelength=0, filter=(chfilters.gaussianR, 1000.), label=0, verbose=0):
'''
the first application of spectrum calculates the line intensities within the specified wavelength range and for set of ions specified
wavelength will not be used if applied to 'spectrum' objects
wavelength IS need for 'bunch' objects - in this case, the wavelength should not extend beyond the limits of the
wvlRange used for the 'bunch' calculation
'''
if not hasattr(self, 'IonInstances'):
print(' must set keepIons=1 in order to keep self.IonInstances')
return
#
if type(label)!= type(0):
if type(label) != str:
print(' label must either be zero or a string')
return
#
t1 = datetime.now()
#:
if hasattr(self, 'Wavelength'):
nWvl = len(self.Wavelength)
wavelength = self.Wavelength
elif type(wavelength) == int:
print(' a wavelength array must be given')
return
else:
self.Wavelength = wavelength
nWvl = len(wavelength)
lineSpectrum = np.zeros((self.NTempDens, nWvl), np.float64).squeeze()
for akey in sorted(self.IonInstances.keys()):
if verbose:
print( ' trying ion = %s'%(akey))
# thisIon = self.IonInstances[akey]
if not 'errorMessage' in sorted(self.IonInstances[akey].Intensity.keys()):
if verbose:
print(' doing convolve on ion %s '%(akey))
self.IonInstances[akey].spectrum(wavelength, filter)
# lineSpectrum = np.add(lineSpectrum, self.IonInstances[akey].Spectrum['intensity'])
if 'errorMessage' in sorted(self.IonInstances[akey].Spectrum.keys()):
print(self.IonInstances[akey].Spectrum['errorMessage'])
else:
lineSpectrum += self.IonInstances[akey].Spectrum['intensity']
# if self.NTempDens == 1:
# lineSpectrum += thisIon.Spectrum['intensity']
# else:
# for iTempDen in range(self.NTempDens):
# lineSpectrum[iTempDen] += thisIon.Spectrum['intensity'][iTempDen]
else:
if 'errorMessage' in sorted(self.IonInstances[akey].Intensity.keys()):
print(self.IonInstances[akey].Intensity['errorMessage'])
self.LineSpectrum = {'wavelength':wavelength, 'intensity':lineSpectrum.squeeze()}
#
total = self.LineSpectrum['intensity']
#
# the following is required in order to be applied to both a 'spectrum' and a 'bunch' object
#
if hasattr(self, 'FreeFree'):
total += self.FreeFree['intensity']
if hasattr(self, 'FreeBound'):
total += self.FreeBound['intensity']
if hasattr(self, 'TwoPhoton'):
total += self.TwoPhoton['intensity']
self.Total = total
#
#
if self.NTempDens == 1:
integrated = total
else:
integrated = total.sum(axis=0)
#
t2 = datetime.now()
dt = t2 - t1
print(' elapsed seconds = %12.3e'%(dt.seconds))
#
if type(label) == type(''):
if hasattr(self, 'Spectrum'):
self.Spectrum[label] = {'wavelength':wavelength, 'intensity':total.squeeze(), 'filter':filter[0].__name__, 'width':filter[1], 'integrated':integrated, 'em':self.Em, 'Abundance':self.AbundanceName}
else:
self.Spectrum = {label:{'wavelength':wavelength, 'intensity':total.squeeze(), 'filter':filter[0].__name__, 'width':filter[1], 'integrated':integrated, 'em':self.Em, 'Abundance':self.AbundanceName}}
else:
self.Spectrum ={'wavelength':wavelength, 'intensity':total.squeeze(), 'filter':filter[0].__name__, 'width':filter[1], 'Abundance':self.AbundanceName}
return
#
# ---------------------------------------------------------------------------
#
def ionGate(self, elementList = None, ionList = None, minAbund=None, doLines=1, doContinuum=1, doWvlTest=1, doIoneqTest=1, includeDiel=False, verbose=0):
'''
creates a list of ions for free-free, free-bound, and line intensity calculations
if doing the radiative losses, accept all wavelength -> doWvlTest=0
the list is a dictionary self.Todo
'''
#
masterlist = chdata.MasterList
abundAll = self.AbundAll
#
nonzed = abundAll > 0.
minAbundAll = abundAll[nonzed].min()
if minAbund:
if minAbund < minAbundAll:
minAbund = minAbundAll
ionInfo = chio.masterListInfo()
#
if hasattr(self, 'Wavelength'):
wvlRange = [self.Wavelength.min(), self.Wavelength.max()]
elif hasattr(self, 'WvlRange'):
wvlRange = self.WvlRange
else:
print(' need a wavelength range in ionGate ')
#
temperature = self.Temperature
#
#
todo = {}
#
#
if elementList:
for i, element in enumerate(elementList):
elementList[i] = element.lower()
if verbose:
print('el = %s'%(element))
z = const.El.index(element.lower()) + 1
for one in masterlist:
nameDict = util.convertName(one)
if nameDict['Element'].lower() in elementList:
# if verbose:
# print(' ion = %s'%(one))
if doLines:
todo[one] = 'line'
for stage in range(2, z+2):
if stage < 31:
name = util.zion2name(z, stage)
if doContinuum and not nameDict['Dielectronic']:
if name not in todo.keys():
todo[name] = 'ff'
else:
todo[name] += '_ff'
todo[name] += '_fb'
if ionList:
for one in ionList:
nameDict = util.convertName(one)
if masterlist.count(one):
if doLines:
todo[one] = 'line'
if verbose:
print(' %s in the CHIANTI database'%(one))
else:
if verbose:
pstring = ' %s not in CHIANTI database'%(one)
print(pstring)
if doContinuum and not nameDict['Dielectronic'] and nameDict['Ion'] < 31:
if one not in todo.keys():
todo[one] = 'ff'
else:
todo[one] += '_ff'
todo[one] += '_fb'
if doIoneqTest:
toPop = []
for ionS in todo:
ioneqTest = (temperature.max() >= ionInfo[ionS]['tmin']) and (temperature.min() <= ionInfo[ionS]['tmax'])
if not ioneqTest:
toPop.append(ionS)
# else:
# if verbose:
# print('passes ioneqtest %s %s'%(ioneqTest, ionS))
for badion in toPop:
# if verbose:
# print('fails ioneqtest %s'%(ionS))
todo.pop(badion)
if doWvlTest:
toPop = []
for ionS in todo:
# if verbose:
# print(' doing wvltest on %s'%(ionS))
if doWvlTest:
wvlTestMin = wvlRange[0] <= ionInfo[ionS]['wmax']
wvlTestMax = wvlRange[1] >= ionInfo[ionS]['wmin']
else:
wvlTestMin = 1
wvlTestMax = 1
# if verbose:
# print(' %s %8.2f %8.2f %8.2f %8.2f'%(ionS, ionInfo[ionS]['wmin'], ionInfo[ionS]['wmax'], wvlRange[0], wvlRange[1]))
# print(' %s wvlTestMin %s wvlTestMax %s'%(ionS, wvlTestMin, wvlTestMax))
if wvlTestMin and wvlTestMax:
pass
else:
toPop.append(ionS)
for badion in toPop:
todo.pop(badion)
#
#
# the relative H abundance is 1.0, the rest are all smaller
if minAbund is not None and type(minAbund) is float:
for iz in range(1, 31):
abundance = chdata.Abundance[self.AbundanceName]['abundance'][iz-1]
if abundance >= minAbund:
if verbose:
print(' %5i %5s abundance = %10.2e '%(iz, const.El[iz-1], abundance))
#
for ionstage in range(1, iz+1):
ionS = util.zion2name(iz, ionstage)
masterListTest = ionS in masterlist
masterListInfoTest = ionS in sorted(ionInfo.keys())
if masterListTest or masterListInfoTest:
if masterListTest or masterListInfoTest:
if doWvlTest:
wvlTestMin = wvlRange[0] <= ionInfo[ionS]['wmax']
wvlTestMax = wvlRange[1] >= ionInfo[ionS]['wmin']
else:
wvlTestMin = 1
wvlTestMax = 1
if temperature.size == 1:
ioneqTest = (temperature >= ionInfo[ionS]['tmin']) and (temperature <= ionInfo[ionS]['tmax'])
else:
ioneqTest = (temperature.max() >= ionInfo[ionS]['tmin']) and (temperature.min() <= ionInfo[ionS]['tmax'])
# construct similar test for the dielectronic files
ionSd = util.zion2name(iz, ionstage, dielectronic=1)
masterListTestD = ionSd in masterlist
masterListInfoTestD = ionSd in sorted(ionInfo.keys())
if masterListTestD or masterListInfoTestD:
if doWvlTest:
wvlTestMinD = wvlRange[0] <= ionInfo[ionSd]['wmax']
wvlTestMaxD = wvlRange[1] >= ionInfo[ionSd]['wmin']
else:
wvlTestMinD = 1
wvlTestMaxD = 1
ioneqTestD = (temperature.max() >= ionInfo[ionSd]['tmin']) and (temperature.min() <=ionInfo[ionSd]['tmax'])
#
if masterListTest and wvlTestMin and wvlTestMax and ioneqTest and doLines:
# if verbose:
# print(' %s passed mList, wvlTest, ioneqTest'%(ionS))
if ionS in sorted(todo.keys()):
todo[ionS] += '_line'
else:
todo[ionS] = 'line'
# get dielectronic lines
# if verbose:
# print(' for ion %s do : %s'%(ionS, todo[ionS]))
if masterListTestD and wvlTestMinD and wvlTestMaxD and ioneqTestD and doLines:
if ionSd in sorted(todo.keys()):
todo[ionSd] += '_line'
else:
todo[ionSd] = 'line'
# if verbose:
# print(' for ion %s do : %s'%(ionSd, todo[ionSd]))
#
#
for ionstage in range(2, iz+2):
ionS = util.zion2name(iz, ionstage)
if ionS in ionInfo.keys():
ioneqTest = (temperature.max() >= ionInfo[ionS]['tmin']) and (temperature.min() <= ionInfo[ionS]['tmax'])
else:
ioneqTest = 1
# construct similar test for the dielectronic files
if ioneqTest and doContinuum:
# ionS is the target ion, cannot be the neutral for the continuum
# if verbose:
# print(' setting up continuum calculation for %s '%(ionS))
if ionstage < 31:
if ionS in sorted(todo.keys()):
todo[ionS] += '_ff_fb'
else:
todo[ionS] = 'ff_fb'
# if verbose:
# print(' for ion %s do : %s'%(ionS, todo[ionS]))
# remove dupicates
# todoSet = set(todo)
# self.Todo = list(todoSet)
dielList = []
if not includeDiel:
for akey in todo:
if 'd' in akey[-1]:
dielList.append(akey)
# if verbose:
# print(' removed dielectronic ion %s for %s', akey, stuff)
newTodo = {}
for akey in todo:
if akey not in dielList:
newTodo[akey] = todo[akey]
self.Todo = newTodo
if len(self.Todo.keys()) == 0:
print(' no elements have been selected')
print(' it is necessary to provide an elementList, an ionList, or set minAbund > 0.')
return
#
# -------------------------------------------------------------------------
#
def spectrumPlot(self, index=-1, integrated=0, saveFile=0, linLog = 'lin'):
'''
to plot the spectrum as a function of wavelength
'''
plt.figure()
mask = self.Em > 1.
if mask.sum() == 0:
ylabel = r'erg cm$^{-2}$ s$^{-1}$ sr$^{-1} \AA^{-1}$ ($\int\,$ N$_e\,$N$_H\,$d${\it l}$)$^{-1}$'
else:
ylabel = r'erg cm$^{-2}$ s$^{-1}$ sr$^{-1} \AA^{-1}$'
#
xlabel = 'Wavelength ('+self.Defaults['wavelength'] +')'
#
# ymin = 10.**(np.log10(emiss.min()).round(0))
#
plt.ion()
#
if integrated:
if 'wavelength' in sorted(self.Spectrum.keys()):
plt.plot(self.Spectrum['wavelength'], self.Spectrum['integrated'])
elif 'wvl' in sorted(self.Spectrum.keys()):
plt.plot(self.Spectrum['wvl'], self.Spectrum['integrated'])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title('integrated spectrum')
else:
nTempDens = self.NTempDens
if nTempDens == 1:
#
if 'wavelength' in sorted(self.Spectrum.keys()):
plt.plot(self.Spectrum['wavelength'], self.Spectrum['intensity'])
elif 'wvl' in sorted(self.Spectrum.keys()):
plt.plot(self.Spectrum['wvl'], self.Spectrum['intensity'])
plt.title(' Temperature = %10.2e K'%(self.Temperature))
else:
if index < 0:
index = nTempDens/2
if 'wavelength' in sorted(self.Spectrum.keys()):
plt.plot(self.Spectrum['wavelength'], self.Spectrum['intensity'][index])
elif 'wvl' in sorted(self.Spectrum.keys()):
plt.plot(self.Spectrum['wvl'], self.Spectrum['intensity'][index])
plt.title(' Temperature = %10.2e K for index = %3i'%(self.Temperature[index], index))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
if saveFile:
plt.savefig(saveFile)
#
# -------------------------------------------------------------------------
#
def lineSpectrumPlot(self, index = 0, integrated=0, saveFile=0, linLog = 'lin'):
'''
to plot the line spectrum as a function of wavelength
'''
#
#
plt.figure()
mask = self.Em > 1.
if mask.sum() == 0:
ylabel = r'erg cm$^{-2}$ s$^{-1}$ sr$^{-1} \AA^{-1}$ ($\int\,$ N$_e\,$N$_H\,$d${\it l}$)$^{-1}$'
else:
ylabel = r'erg cm$^{-2}$ s$^{-1}$ sr$^{-1} \AA^{-1}$'
#
xlabel = 'Wavelength ('+self.Defaults['wavelength'].capitalize() +')'
#
# ymin = 10.**(np.log10(emiss.min()).round(0))
#
plt.ion()
#
if integrated:
plt.plot(self.Spectrum['wavelength'], self.Spectrum['intensity'].sum(axis=0))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title('integrated spectrum')
else:
nTempDens = self.NTempDens
if nTempDens == 1:
#
plt.plot(self.LineSpectrum['wavelength'], self.LineSpectrum['intensity'])
plt.title(' Temperature = %10.2e K '%(self.Temperature))
else:
plt.plot(self.LineSpectrum['wavelength'], self.LineSpectrum['intensity'][index])
plt.title(' Temperature = %10.2e K for index = %3i'%(self.Temperature[index], index))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if saveFile:
plt.savefig(saveFile)
#
# -------------------------------------------------------------------------
|
tgrassiREPO_NAMEprizmoPATH_START.@prizmo_extracted@prizmo-main@src_py@ChiantiPy@base@[email protected]_END.py
|
{
"filename": "makePlanetInput-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/midMDwarfs/.ipynb_checkpoints/makePlanetInput-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
This notebook prepares a planet candidate catalog for the stellar population in the specified input stellar catalog. It computes the reliability, corrected planet radius and includes useful planet properties such as robovetter score. It outputs two catalogs, one that contains only PCs and one that contains all KOIs.
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$ is the false positive effectiveness, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We will separately measure $E$ and $F_{\mathrm{obsFP}}$ as binomial point processes with probabilities that depend on period and MES. Once we have $F_{\mathrm{obsFP}}$ then $F_{\mathrm{obsPC}} = 1 - F_{\mathrm{obsFP}}$, assuming that $N_{\mathrm{obsTCEs}} = N_{\mathrm{obsPC}} + N_{\mathrm{obsFP}}$.
We think of TCEs as consisting of two sets: those that are dispositioned as FP and those that are dispositioned as PC. We do this for both the observed TCEs, and for inverted/scrambled TCEs, where all TCEs are true false positives. Then we can think of the vetting process as drawing from the set of TCEs, with a probability $r$ of selecting either PCs or FPs. Then the probability distribution of selecting $c$ FPs from $n$ TCEs is given by the binomial distribution
$$P\{c\} = \left( \begin{array}{c} n \\ c \end{array} \right) r^c (1-r)^{n-c}.$$
To measure $E$ we use the inverted and scrambled data sets, where all detected TCEs are by definition FPs. We define $E$ as the probability of drawing FPs from inverted/scrambled TCEs, found via the Bayesian inference $p(E|n, c) \propto p(c|E, n) p(E)$, where
$$p(c|E, n) = \left( \begin{array}{c} n \\ c \end{array} \right) E^c (1-E)^{n-c}$$ and
$p(E)$ is a prior distribution of the probability $E$. By putting the data on a grid indexed by $i,j$, we can fit effectiveness as a function parameterized by a vector $\theta$, $E(\theta,\mathrm{period},\mathrm{MES})$, as $p(\theta)|n_{i,j}, c_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) \propto p(c_{i,j}|\theta, n_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) p(\theta)$, where $p(\theta)$ is some prior distribution of the parameters.
To measure $F_{\mathrm{obsFP}}$ we perform a similar inference using the set of observed TCEs, and inferring the probability of drawing c FPs from n observed TCEs. The inference in this case becomes $p(F_{\mathrm{obsFP}}|n, c) \propto p(c|F_{\mathrm{obsFP}}, n) p(F_{\mathrm{obsFP}})$, which we can parameterize interms of a function similar to effectiveness.
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spec
import pandas as pd
from astropy.io import ascii
from astropy.table import Table, vstack
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import sys
sys.path.insert(0, '..')
import dr25Models as funcModels
```
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{1 - F_{\mathrm{obsFP}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We get $E$ and $F_{\mathrm{obsFP}}$ from the outputs of the notebooks binomialFPEffectiveness.ipynb and binomialObsFPRate.ipynb.
```python
dataType = "midMDwarfsBerger2019"
outputDir = dataType + "Output/"
htmlLabel = dataType
```
```python
import requests
from cStringIO import StringIO
if False:
selectStr = "kepid,kepoi_name,koi_tce_plnt_num,koi_pdisposition,koi_score,koi_period,koi_max_mult_ev,koi_prad,koi_prad_err1,koi_prad_err2,koi_ror,koi_ror_err1,koi_ror_err2"
urlDr25Koi = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=q1_q17_dr25_koi&select=" + selectStr
r = requests.get(urlDr25Koi)
if r.status_code != requests.codes.ok:
r.raise_for_status()
fh = StringIO(r.content)
dr25Koi = pd.read_csv(fh, dtype={"kepoi_name":str})
dr25Koi.to_csv("koiCatalogs/dr25_kois_archive.txt", index=False)
else:
dr25Koi = pd.read_csv("../GKbaseline/koiCatalogs/dr25_kois_archive.txt", dtype={"kepoi_name":str})
print("Loaded " + str(len(dr25Koi)) + " KOIs")
```
Loaded 8054 KOIs
```python
```
```python
# add TCE_ID for a later merge
TCE_ID = []
for i in range(len(dr25Koi)):
TCE_ID.append("%09d"%dr25Koi.kepid.loc[i] + "-%02d"%dr25Koi.koi_tce_plnt_num.loc[i])
dr25Koi["TCE_ID"] = TCE_ID
```
```python
# restrict the population to stars in the Travis' catalog
if dataType == "midMDwarfsBerger2019":
starlist = "stellarCatalogs/dr25_Berger2019_stellar_clean.txt"
else:
raise ValueError('bad dataType');
dr25CleanStellarIso = pd.read_csv(starlist)
# dr25Koi = dr25Koi[dr25Koi.kepid.isin(dr25CleanStellarIso.kepid)]
# merge in only iso_rad and uncertainties from the stellar table
dr25Koi = pd.merge(dr25Koi, dr25CleanStellarIso[["kepid","radius","radius_err1","radius_err2","teff"]], on="kepid", how="inner")
dr25Koi = dr25Koi.reset_index(drop=True)
print("length of dr25CleanStellarIso = " + str(len(dr25CleanStellarIso.kepid)))
print("length of unique dr25CleanStellarIso = " + str(len(np.unique(dr25CleanStellarIso.kepid))))
print("length of dr25Koi = " + str(len(dr25Koi.kepoi_name)))
print("length of unique dr25Koi = " + str(len(np.unique(dr25Koi.kepoi_name))))
```
length of dr25CleanStellarIso = 393
length of unique dr25CleanStellarIso = 393
length of dr25Koi = 17
length of unique dr25Koi = 17
```python
print("length of unique dr25Koi = " + str(len(np.unique(dr25Koi.kepoi_name))))
print("length of unique dr25Koi kepids = " + str(len(np.unique(dr25Koi.kepid))))
np.unique(dr25Koi.kepoi_name)
```
length of unique dr25Koi = 17
length of unique dr25Koi kepids = 11
array(['K00961.01', 'K00961.02', 'K00961.03', 'K01702.01', 'K02685.01',
'K02704.01', 'K02704.02', 'K02842.01', 'K02842.02', 'K02842.03',
'K03138.01', 'K03138.02', 'K04290.01', 'K06705.01', 'K06863.01',
'K07952.01', 'K08012.01'], dtype=object)
```python
```
```python
```
```python
# correct the planet radii with the new catalog
rEarth = 6356.8 # km
rSun = 695700 # km
dr25Koi['corrected_prad'] = dr25Koi['koi_ror']*dr25Koi['radius']*rSun/rEarth;
dr25Koi['corrected_prad_err1'] = np.sqrt(dr25Koi['koi_ror_err1']**2*dr25Koi['radius']**2
+dr25Koi['koi_ror']**2*dr25Koi['radius_err1']**2)*rSun/rEarth;
dr25Koi['corrected_prad_err2'] = -np.sqrt(dr25Koi['koi_ror_err2']**2*dr25Koi['radius']**2
+dr25Koi['koi_ror']**2*dr25Koi['radius_err2']**2)*rSun/rEarth;
dr25Koi = dr25Koi[~np.isnan(dr25Koi.koi_prad)]
```
```python
v = dr25Koi.corrected_prad_err1/dr25Koi.koi_prad_err1
plt.hist(v[v<5], 100);
```

```python
plt.hist(dr25Koi['corrected_prad'][dr25Koi['corrected_prad']<10], 100);
```

```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(dr25Koi.koi_period, dr25Koi.koi_prad,
yerr = [-dr25Koi.koi_prad_err2, dr25Koi.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(dr25Koi.koi_period, dr25Koi.corrected_prad,
yerr = [-dr25Koi.corrected_prad_err2, dr25Koi.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([5, 30])
```
(5, 30)

```python
```
```python
dr25Fpp = ascii.read("../data/q1_q17_dr25_koifpp.txt")
dr25FppPd = dr25Fpp.to_pandas()
```
```python
```
```python
mergedDr25Koi = pd.merge(dr25Koi, dr25FppPd, on="kepoi_name", how="inner")
```
```python
mergedDr25Koi.loc[:,"reliability"] = 1
```
```python
plt.hist(mergedDr25Koi.koi_score, 40);
plt.yscale('log', nonposy='clip')
```

```python
np.sum(np.isnan(mergedDr25Koi.fpp_prob) & mergedDr25Koi.koi_period > 50)
```
0
```python
mergedDr25Koi[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>kepid_x</th>
<th>kepoi_name</th>
<th>koi_tce_plnt_num</th>
<th>koi_pdisposition</th>
<th>koi_score</th>
<th>koi_period</th>
<th>koi_max_mult_ev</th>
<th>koi_prad</th>
<th>koi_prad_err1</th>
<th>koi_prad_err2</th>
<th>...</th>
<th>radius_err2</th>
<th>teff</th>
<th>corrected_prad</th>
<th>corrected_prad_err1</th>
<th>corrected_prad_err2</th>
<th>rowid</th>
<th>kepid_y</th>
<th>fpp_koi_period</th>
<th>fpp_prob</th>
<th>reliability</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
<p>0 rows × 26 columns</p>
</div>
```python
mergedDr25Koi["fpp_prob_use"] = mergedDr25Koi["fpp_prob"]
mergedDr25Koi.fpp_prob_use[np.isnan(mergedDr25Koi.fpp_prob)] = 1
mergedDr25Koi.fpp_prob_use[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2] = 1
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
```python
mergedDr25Koi[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>kepid_x</th>
<th>kepoi_name</th>
<th>koi_tce_plnt_num</th>
<th>koi_pdisposition</th>
<th>koi_score</th>
<th>koi_period</th>
<th>koi_max_mult_ev</th>
<th>koi_prad</th>
<th>koi_prad_err1</th>
<th>koi_prad_err2</th>
<th>...</th>
<th>teff</th>
<th>corrected_prad</th>
<th>corrected_prad_err1</th>
<th>corrected_prad_err2</th>
<th>rowid</th>
<th>kepid_y</th>
<th>fpp_koi_period</th>
<th>fpp_prob</th>
<th>reliability</th>
<th>fpp_prob_use</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
<p>0 rows × 27 columns</p>
</div>
```python
mergedDr25Koi["totalReliability"] = (1-mergedDr25Koi.fpp_prob_use)*mergedDr25Koi.reliability
```
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.koi_max_mult_ev, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.3);
plt.xlabel("period");
plt.ylabel("MES");
plt.title("KOI Reliability, size = total reliability");
plt.ylim([7, 50])
plt.xlim([5, 30])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.3);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Reliability, size = total reliability");
plt.ylim([0, 2.5])
plt.xlim([5, 30])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.reliability, alpha = 0.3);
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.3);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Reliability, size = reliability");
plt.ylim([0, 2.5])
plt.xlim([5, 30])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
dataLoc = "../data/"
obsTceList = dataLoc + "kplr_dr25_obs_tces.txt"
obsTcesFull = ascii.read(obsTceList);
obsTcesFullPd = obsTcesFull.to_pandas();
mergedDr25Koi = pd.merge(mergedDr25Koi, obsTcesFullPd[["TCE_ID","Disp"]], on="TCE_ID", how="inner")
```
```python
newPCs = mergedDr25Koi[(mergedDr25Koi.koi_pdisposition == "FALSE POSITIVE") & (mergedDr25Koi.Disp == "PC")]
newPCsInBox = newPCs[(newPCs.koi_period >= 50) & (newPCs.koi_period <= 400)
& (newPCs.corrected_prad >= 0.75) & (newPCs.corrected_prad <= 2.5)]
lostPCs = mergedDr25Koi[(mergedDr25Koi.koi_pdisposition == "CANDIDATE") & (mergedDr25Koi.Disp == "FP")]
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.plot(newPCs.koi_period, newPCs.corrected_prad, '.');
scf = ax.plot(newPCsInBox.koi_period, newPCsInBox.corrected_prad, 'r+');
# plt.xlim([50, 400])
plt.plot([400, 400], [0.75, 2.5], color='k', linestyle=':', linewidth=1)
plt.plot([50, 400], [0.75, 0.75], color='k', linestyle=':', linewidth=1)
plt.plot([50, 400], [2.5, 2.5], color='k', linestyle=':', linewidth=1)
plt.plot([50, 50], [0.75, 2.5], color='k', linestyle=':', linewidth=1)
plt.ylim([0, 5])
plt.xlabel("period");
plt.ylabel("corrected radius");
plt.title("new PCs in the low-reliability robovetter result");
```

```python
newPCs.to_csv(outputDir + "newPCsHighCompleteness.txt",
columns=["TCE_ID", "koi_period", "corrected_prad", "totalReliability", "reliability", "fpp_prob_use"],
index=False)
newPCsInBox.to_csv(outputDir + "newPCsHighCompletenessInBox.txt",
columns=["TCE_ID", "koi_period", "corrected_prad", "totalReliability", "reliability", "fpp_prob_use"],
index=False)
```
```python
```
```python
# dr25PC = mergedDr25Koi[mergedDr25Koi.koi_pdisposition == "CANDIDATE"]
# dr25FP = mergedDr25Koi[mergedDr25Koi.koi_pdisposition == "FALSE POSITIVE"]
dr25PC = mergedDr25Koi[mergedDr25Koi.Disp == "PC"]
dr25FP = mergedDr25Koi[mergedDr25Koi.Disp == "FP"]
print("There are " + str(len(dr25PC)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
print("There are " + str(len(dr25FP)) + " FPs in " + str(len(dr25CleanStellarIso)) + " observed targets")
# remove those with corrected_prad = NAN
dr25PC = dr25PC[~np.isnan(dr25PC.corrected_prad)]
dr25FP = dr25FP[~np.isnan(dr25FP.corrected_prad)]
mergedDr25Koi = mergedDr25Koi[~np.isnan(mergedDr25Koi.corrected_prad)]
print("after removing NaNs")
print("There are " + str(len(dr25PC)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
print("There are " + str(len(dr25FP)) + " FPs in " + str(len(dr25CleanStellarIso)) + " observed targets")
```
There are 2009 PCs in 58974 observed targets
There are 551 FPs in 58974 observed targets
after removing NaNs
There are 2009 PCs in 58974 observed targets
There are 551 FPs in 58974 observed targets
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.koi_max_mult_ev, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.3);
plt.xlabel("period");
plt.ylabel("MES");
plt.title("PC Reliability, size = total reliability");
#plt.ylim([7, 30])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.3);
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, s=100*dr25PC.totalReliability,
c=dr25PC.reliability, facecolors='none', edgecolors='k', alpha = 0.3);
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("PC Reliability, size = reliability");
#plt.ylim([0, 2.5])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', alpha = 0.3);
plt.xlabel("period", fontsize = 24);
plt.ylabel("corrected planet radius", fontsize = 24);
plt.title("PC FA Reliability", fontsize = 24);
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("FA Reliability");
plt.savefig(outputDir + "pcReliability.pdf",bbox_inches='tight')
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.3);
plt.xlabel("period", fontsize = 24);
plt.ylabel("DR25 planet radius", fontsize = 24);
plt.title("PC Reliability, size = total reliability", fontsize = 24);
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
plt.savefig(outputDir + "pcReliability.pdf",bbox_inches='tight')
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
```
[<matplotlib.lines.Line2D at 0x7fbf79250390>]


```python
plt.figure();
plt.hist(dr25PC[(dr25PC.koi_period>250) & (dr25PC.koi_period<400) & (dr25PC.corrected_prad<2.5)].totalReliability, 20);
plt.xlabel("total reliability")
plt.xlim(0,1)
plt.figure();
plt.hist(dr25PC[(dr25PC.koi_period>250) & (dr25PC.koi_period<400) & (dr25PC.corrected_prad<2.5)].reliability, 20);
plt.xlabel("FA reliability")
plt.xlim(0,1)
print("sum of totalreliability for 50 < p < 400 and r < 2.5 for "
+ str(len(dr25PC[(dr25PC.koi_period>50) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad<2.5)])) + " planets: "
+ str(np.sum(dr25PC[(dr25PC.koi_period>50) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad<2.5)].totalReliability)))
print("sum of totalreliability for 50 < p < 200 and 1 < r < 2 for "
+ str(len(dr25PC[(dr25PC.koi_period>50) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad>1) & (dr25PC.corrected_prad<2)])) + " planets: "
+ str(np.sum(dr25PC[(dr25PC.koi_period>50) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad>1) & (dr25PC.corrected_prad<2)].totalReliability)))
print("sum of totalreliability for 250 < p < 400 and r < 2.5 for "
+ str(len(dr25PC[(dr25PC.koi_period>250) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad<2.5)])) + " planets: "
+ str(np.sum(dr25PC[(dr25PC.koi_period>250) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad<2.5)].totalReliability)))
print("sum of totalreliability for 250 < p < 400 and r < 1.5 for "
+ str(len(dr25PC[(dr25PC.koi_period>250) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad<1.5)])) + " planets: "
+ str(np.sum(dr25PC[(dr25PC.koi_period>250) & (dr25PC.koi_period<400)
& (dr25PC.corrected_prad<1.5)].totalReliability)))
```
sum of totalreliability for 50 < p < 400 and r < 2.5 for 128 planets: 100.15695214231158
sum of totalreliability for 50 < p < 200 and 1 < r < 2 for 52 planets: 37.07717418585386
sum of totalreliability for 250 < p < 400 and r < 2.5 for 21 planets: 6.761122738479418
sum of totalreliability for 250 < p < 400 and r < 1.5 for 7 planets: 1.7136370246136745


```python
dr25PcInRange = dr25PC[(dr25PC.koi_period>50)&(dr25PC.koi_period<400)&(dr25PC.corrected_prad>0)&(dr25PC.corrected_prad<2.5)]
```
```python
fig, ax = plt.subplots(figsize=(15,10));
rs = mergedDr25Koi.totalReliability*mergedDr25Koi.koi_score
ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, marker="+", alpha=0.2);
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=rs, edgecolors='k', s=100*rs, alpha = 0.3);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Total Reliability x Score");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("KOI Total Reliability x Score");
```

```python
plt.hist(dr25PC.corrected_prad/dr25PC.koi_prad, 100);
#plt.yscale('log', nonposy='clip')
```

```python
plt.hist(dr25CleanStellarIso.radius[dr25CleanStellarIso.radius<2]/dr25CleanStellarIso.radius_DR25[dr25CleanStellarIso.radius<2], 100);
#plt.yscale('log', nonposy='clip')
```

```python
dr25PC.to_csv("koiCatalogs/dr25_GK_PCs_" + htmlLabel + ".csv", index=False)
mergedDr25Koi.to_csv("koiCatalogs/dr25_GK_KOIs_" + htmlLabel + ".csv", index=False)
```
```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(dr25PC.koi_period, dr25PC.koi_prad,
yerr = [-dr25PC.koi_prad_err2, dr25PC.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(dr25PC.koi_period, dr25PC.corrected_prad,
yerr = [-dr25PC.corrected_prad_err2, dr25PC.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
plt.hist(dr25PC.koi_score, 40);
plt.yscale('log', nonposy='clip')
plt.title("PC score distribution")
plt.hist(dr25FP.koi_score, 40, alpha=0.5);
plt.yscale('log', nonposy='clip')
plt.title("FP score distribution")
```
Text(0.5,1,'FP score distribution')

```python
period_rng = (50, 200)
rp_rng = (1., 2.)
occPcs = dr25PC[(dr25PC.koi_period>=period_rng[0])&(dr25PC.koi_period<=period_rng[1])&(dr25PC.corrected_prad>=rp_rng[0])&(dr25PC.corrected_prad<=rp_rng[1])]
print("After radius correction there are " + str(len(occPcs)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
occPcs2 = dr25PC[(dr25PC.koi_period>=period_rng[0])&(dr25PC.koi_period<=period_rng[1])&(dr25PC.koi_prad>=rp_rng[0])&(dr25PC.koi_prad<=rp_rng[1])]
print("Before radius correction there are " + str(len(occPcs2)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
```
After radius correction there are 38 PCs in 58974 observed targets
Before radius correction there are 41 PCs in 58974 observed targets
```javascript
%%javascript
IPython.notebook.save_notebook()
```
<IPython.core.display.Javascript object>
```bash
%%bash -s "$htmlLabel"
jupyter nbconvert --to html makePlanetInput.ipynb
mv makePlanetInput.html htmlArchive/makePlanetInput_$1.html
```
[NbConvertApp] Converting notebook makePlanetInput.ipynb to html
[NbConvertApp] Writing 2032691 bytes to makePlanetInput.html
```python
plt.figure(figsize=(5,5));
plt.plot(dr25PC.koi_score, dr25PC.totalReliability, '.')
plt.xlabel("score")
plt.ylabel("total reliability")
plt.figure(figsize=(5,5));
plt.plot(dr25PC.koi_score, dr25PC.reliability, '.')
plt.xlabel("score")
plt.ylabel("instrumental reliability")
```
Text(0,0.5,'instrumental reliability')


```python
print(float((66036-60220))/60220)
```
0.0965792095649
```python
print((33.-25)/25)
```
0.32
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=1-dr25PC.fpp_prob_use, edgecolors='k', alpha = 0.3);
plt.xlabel("period");
plt.ylabel("radius");
plt.title("PC FPP");
plt.ylim([0.75, 12])
plt.xlim([1, 50])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("FPP");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.totalReliability, edgecolors='k', s=100*dr25PC.reliability, alpha = 0.3);
plt.xlabel("period");
plt.ylabel("radius");
plt.title("PC total reliability");
plt.ylim([0.75, 12])
plt.xlim([1, 20])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("total reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=1-dr25PC.fpp_prob_use, edgecolors='k', s=100*dr25PC.reliability, alpha = 0.3);
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("PC astrophysical Reliability, size = instrumental FP reliability");
#plt.ylim([0, 2.5])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("1-FPP");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=1-mergedDr25Koi.fpp_prob_use, edgecolors='k', s=100*mergedDr25Koi.reliability, alpha = 0.3);
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI astrophysical Reliability, size = instrumental FP reliability");
#plt.ylim([0, 2.5])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("1-FPP");
```

```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@[email protected][email protected]@.PATH_END.py
|
{
"filename": "progBar.py",
"repo_name": "kylemede/ExoSOFT",
"repo_path": "ExoSOFT_extracted/ExoSOFT-master/ExoSOFT/tools/progBar.py",
"type": "Python"
}
|
from __future__ import absolute_import
import sys
class ProgBar(object):
"""
Call in a loop to create terminal progress bar
Heavily modified, but code originally copied from:
http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
def __init__(self,total=100, decimals = 0, barLength = 100):
self.formatStr = "{0:." + str(decimals) + "f}"
self.total = total
self.decimals = decimals
self.barLength = barLength
def render(self,iteration,prefix = '', suffix = ''):
percents = self.formatStr.format(100 * (iteration / float(self.total)))
filledLength = int(round(self.barLength * iteration / float(self.total)))
bar=''
if self.barLength>0:
if filledLength<2:
filledLength=1
bar = '=' * (filledLength-1)+'>' + '-' * (self.barLength - filledLength)
sys.stdout.write('\r%s %s %s%s %s' % (prefix, bar, percents, '%', suffix))
sys.stdout.flush()
if iteration == self.total:
#sys.stdout.write('\n') #$$ Might want to un-comment this in the future
sys.stdout.flush()
|
kylemedeREPO_NAMEExoSOFTPATH_START.@ExoSOFT_extracted@ExoSOFT-master@ExoSOFT@[email protected]@.PATH_END.py
|
{
"filename": "algol.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py2/pygments/styles/algol.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
pygments.styles.algol
~~~~~~~~~~~~~~~~~~~~~
Algol publication style.
This style renders source code for publication of algorithms in
scientific papers and academic texts, where its format is frequently used.
It is based on the style of the revised Algol-60 language report[1].
o No colours, only black, white and shades of grey are used.
o Keywords are rendered in lowercase underline boldface.
o Builtins are rendered in lowercase boldface italic.
o Docstrings and pragmas are rendered in dark grey boldface.
o Library identifiers are rendered in dark grey boldface italic.
o Comments are rendered in grey italic.
To render keywords without underlining, refer to the `Algol_Nu` style.
For lowercase conversion of keywords and builtins in languages where
these are not or might not be lowercase, a supporting lexer is required.
The Algol and Modula-2 lexers automatically convert to lowercase whenever
this style is selected.
[1] `Revised Report on the Algorithmic Language Algol-60 <http://www.masswerk.at/algol60/report.htm>`
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Operator
class AlgolStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "italic #888",
Comment.Preproc: "bold noitalic #888",
Comment.Special: "bold noitalic #888",
Keyword: "underline bold",
Keyword.Declaration: "italic",
Name.Builtin: "bold italic",
Name.Builtin.Pseudo: "bold italic",
Name.Namespace: "bold italic #666",
Name.Class: "bold italic #666",
Name.Function: "bold italic #666",
Name.Variable: "bold italic #666",
Name.Constant: "bold italic #666",
Operator.Word: "bold",
String: "italic #666",
Error: "border:#FF0000"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py2@pygments@[email protected]@.PATH_END.py
|
{
"filename": "11283_pils_absflux.py",
"repo_name": "shreeyesh-biswal/Rvalue_3D",
"repo_path": "Rvalue_3D_extracted/Rvalue_3D-main/Codes/X-class/AR_11283/11283_pils_absflux.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 5 22:30:51 2022
@author: shreeyeshbiswal
"""
import os
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
AR = "11283"
core_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/"
base_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR
dir_list = sorted(os.listdir(base_dir))
n = len(dir_list)
m = 10 # values per file
tot_len_matrix = np.zeros(shape=(n,m))
max_len_matrix = np.zeros(shape=(n,m))
abs_flx_matrix = np.zeros(shape=(n,m))
index = np.arange(0,n)
height = np.arange(0,m)*0.36
P3 = 'Absolute Flux near PILs (10$^{20}$ Mx); AR ' + AR
colorbarticks = [0, 5, 10, 15, 20, 25, 30, 35, 40]
cbar_min = 0
cbar_max = 40
flare_time1 = 142.2
flare_time2 = 166.53
for i in range(0,n):
Time_tag = dir_list[i]
Time = Time_tag[0:19]
Hour = Time[11:13]
print(Time)
dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR + "/" + Time_tag
os.chdir(dir)
# the if-else statement takes care of missing data
if len(os.listdir(dir)) != 0:
mpils = np.loadtxt("PF_ext_mpils_" + Time + ".dat")
print(np.shape(mpils))
tot_len_matrix[i,:] = mpils[:,0]
max_len_matrix[i,:] = mpils[:,1]
abs_flx_matrix[i,:] = mpils[:,2]
print(Hour)
else:
tot_len_matrix[i,:] = np.nan
max_len_matrix[i,:] = np.nan
abs_flx_matrix[i,:] = np.nan
print("Empty directory")
os.chdir(core_dir)
x = np.arange(0,n)
figure(figsize=(10,10), dpi=100000)
figure, axs = plt.subplots(10)
figure.set_figheight(15)
figure.set_figwidth(9)
cm = plt.cm.get_cmap('afmhot')
mpl.rc('xtick', labelsize=13)
# Plot
sc = axs[0].scatter(x, abs_flx_matrix[:,9], c = abs_flx_matrix[:,9], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].scatter(x, abs_flx_matrix[:,9-i], c = abs_flx_matrix[:,9-i], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].set_ylim([cbar_min, cbar_max])
axs[9].tick_params(axis='x', labelsize=16)
axs[9].set_xticks(np.arange(0,n,24))
# Hide the ylims of individual boxes
for i in range(0,m):
axs[i].set_yticks([])
# Show heights in the altitude
heightfont = 16
for i in range(0,m):
max_alt = (m-1)*0.36
altitude = max_alt-(i*0.36)
alt_str = "{:.2f}".format(altitude)
axs[i].set_ylabel(alt_str + ' ', fontsize = heightfont, rotation = 0)
# Show flare occurence in dotted lines
for i in range(0,m):
axs[i].axvline(x = flare_time1, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)# Show heights in the altitude
axs[i].axvline(x = flare_time2, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)# Show heights in the altitude
axs[i].axvline(x = 204, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.00)# Show heights in the altitude
# Orient the text
st = dir_list[0]
start_time = st[0:4] + '/' + st[5:7] + '/' + st[8:10] + '/' + st[11:13] + ':' + st[14:16]
axs[0].text(-30, (cbar_max + (0.35*(cbar_max - cbar_min))), P3, fontsize=23)
axs[5].text(-54, cbar_min + 0.5*(cbar_max - cbar_min), 'Height (Mm)', rotation = 90, fontsize=18)
axs[9].text(16, (cbar_min - (0.65*(cbar_max - cbar_min))), 'Time after ' + start_time + ' (hrs)', rotation = 0, fontsize=18)
figure.subplots_adjust(right=0.8)
cbar_ax = figure.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax.tick_params(labelsize=16)
figure.colorbar(sc, cax=cbar_ax, ticks=colorbarticks)
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.show()
mpl.rcParams.update(mpl.rcParamsDefault)
|
shreeyesh-biswalREPO_NAMERvalue_3DPATH_START.@Rvalue_3D_extracted@Rvalue_3D-main@Codes@X-class@AR_11283@[email protected]_END.py
|
{
"filename": "plot_spectrum.py",
"repo_name": "ACCarnall/bagpipes",
"repo_path": "bagpipes_extracted/bagpipes-master/bagpipes/plotting/plot_spectrum.py",
"type": "Python"
}
|
from __future__ import print_function, division, absolute_import
import numpy as np
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except RuntimeError:
pass
from .general import *
def add_spectrum(spectrum, ax, x_ticks=None, zorder=4, z_non_zero=True,
y_scale=None, ymax=None, color="default", lw=2., label=None,
alpha=1):
""" Add a spectrum to the passed axes. Adds errors if they are
included in the spectrum object as a third column. """
# Sort out axis limits
if not ymax:
ymax = 1.05*np.nanmax(spectrum[:, 1])
if y_scale is None:
y_scale = float(int(np.log10(ymax))-1)
ax.set_ylim(0., ymax*10**-y_scale)
ax.set_xlim(spectrum[0, 0], spectrum[-1, 0])
# Plot the data
if spectrum.shape[1] == 2:
if color == "default":
color = "sandybrown"
ax.plot(spectrum[:, 0], spectrum[:, 1]*10**-y_scale,
color=color, zorder=zorder, lw=lw, label=label, alpha=alpha)
elif spectrum.shape[1] == 3:
if color == "default":
color = "dodgerblue"
ax.plot(spectrum[:, 0], spectrum[:, 1]*10**-y_scale,
color=color, zorder=zorder, lw=lw, label=label, alpha=alpha)
lower = (spectrum[:, 1] - spectrum[:, 2])*10**-y_scale
upper = (spectrum[:, 1] + spectrum[:, 2])*10**-y_scale
upper[upper > ymax*10**-y_scale] = ymax*10**-y_scale
lower[lower < 0.] = 0.
ax.fill_between(spectrum[:, 0], lower, upper, color=color,
zorder=zorder-1, alpha=0.75, linewidth=0)
# Sort out x tick locations
if x_ticks is None:
auto_x_ticks(ax)
else:
ax.set_xticks(x_ticks)
# Sort out axis labels.
auto_axis_label(ax, y_scale, z_non_zero=z_non_zero)
return y_scale
|
ACCarnallREPO_NAMEbagpipesPATH_START.@bagpipes_extracted@bagpipes-master@bagpipes@plotting@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/utils/sphinx/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension for sphinx documentation specific to GWpy
"""
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@utils@sphinx@[email protected]_END.py
|
{
"filename": "waterfaller.py",
"repo_name": "CHIME-Pulsar-Timing/CHIME-Pulsar_automated_filterbank",
"repo_path": "CHIME-Pulsar_automated_filterbank_extracted/CHIME-Pulsar_automated_filterbank-main/waterfaller.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
waterfaller.py
Make waterfall plots to show frequency sweep of a single pulse.
Reads PSRFITS or SIGPROC filterbank format files.
Patrick Lazarus - Aug. 19, 2011
Paul Scholz - Nov 2015
"""
import optparse
import matplotlib.pyplot as plt
import matplotlib.cm
import numpy as np
from presto import psr_utils
from presto import rfifind
from presto import psrfits
from presto import filterbank
SWEEP_STYLES = ['r-', 'b-', 'g-', 'm-', 'c-']
def get_mask(rfimask, startsamp, N):
"""Return an array of boolean values to act as a mask
for a Spectra object.
Inputs:
rfimask: An rfifind.rfifind object
startsamp: Starting sample
N: number of samples to read
Output:
mask: 2D numpy array of boolean values.
True represents an element that should be masked.
"""
sampnums = np.arange(startsamp, startsamp+N)
blocknums = np.floor(sampnums/rfimask.ptsperint).astype('int')
mask = np.zeros((N, rfimask.nchan), dtype='bool')
for blocknum in np.unique(blocknums):
blockmask = np.zeros_like(mask[blocknums==blocknum])
chans_to_mask = rfimask.mask_zap_chans_per_int[blocknum]
if chans_to_mask.any():
blockmask[:,chans_to_mask] = True
mask[blocknums==blocknum] = blockmask
return mask.T
def maskfile(maskfn, data, start_bin, nbinsextra,extra_mask):
rfimask = rfifind.rfifind(maskfn)
mask = get_mask(rfimask, start_bin, nbinsextra)[::-1]
masked_chans = mask.all(axis=1)
# Mask data
if extra_mask.any():
masked_chans=np.append(masked_chans,extra_mask)
data = data.masked(mask, maskval='median-mid80')
#datacopy = copy.deepcopy(data)
return data, masked_chans
def waterfall(rawdatafile, start, duration, dm=None, nbins=None, nsub=None,\
subdm=None, zerodm=False, downsamp=1, scaleindep=False,\
width_bins=1, mask=False, maskfn=None,extra_mask=None, bandpass_corr=False,
ref_freq=None,freq_mask=None):
"""
Create a waterfall plot (i.e. dynamic specrum) from a raw data file.
Inputs:
rawdatafile - a PsrfitsData instance.
start - start time of the data to be read in for waterfalling.
duration - duration of data to be waterfalled.
Optional Inputs:
dm - DM to use when dedispersing data.
Default: Don't de-disperse
nbins - Number of time bins to plot. This option overrides
the duration argument.
Default: determine nbins from duration.
nsub - Number of subbands to use. Must be a factor of number of channels.
Default: Number of channels.
subdm - DM to use when subbanding. Default: same as dm argument.
zerodm - subtract mean of each time-sample from data before
de-dispersing.
downsamp - Factor to downsample in time by. Default: Don't downsample.
scaleindep - Scale each channel independently.
Default: Scale using global maximum.
width_bins - Smooth each channel/subband with a boxcar width_bins wide.
Default: Don't smooth.
maskfn - Filename of RFIFIND mask to use for masking data.
Default: Don't mask data.
bandpass_corr - Correct for the bandpass. Requires an rfifind
mask provided by maskfn keyword argument.
Default: Do not remove bandpass.
ref_freq - Reference frequency to de-disperse to.
If subbanding and de-dispersing the start time
will be corrected to account for change in
reference frequency.
Default: Frequency of top channel.
Outputs:
data - Spectra instance of waterfalled data cube.
nbinsextra - number of time bins read in from raw data.
nbins - number of bins in duration.
start - corrected start time.
"""
if subdm is None:
subdm = dm
# Read data
if ref_freq is None:
ref_freq = rawdatafile.freqs.max()
if nsub and dm:
df = rawdatafile.freqs[1] - rawdatafile.freqs[0]
nchan_per_sub = rawdatafile.nchan/nsub
top_ctrfreq = rawdatafile.freqs.max() - \
0.5*nchan_per_sub*df # center of top subband
start += 4.15e3 * np.abs(1./ref_freq**2 - 1./top_ctrfreq**2) * dm
start_bin = np.round(start/rawdatafile.tsamp).astype('int')
dmfac = 4.15e3 * np.abs(1./rawdatafile.frequencies[0]**2 - 1./rawdatafile.frequencies[-1]**2)
if nbins is None:
nbins = np.round(duration/rawdatafile.tsamp).astype('int')
if dm:
nbinsextra = np.round((duration + dmfac * dm)/rawdatafile.tsamp).astype('int')
else:
nbinsextra = nbins
# If at end of observation
if (start_bin + nbinsextra) > rawdatafile.nspec-1:
nbinsextra = rawdatafile.nspec-1-start_bin
data = rawdatafile.get_spectra(start_bin, nbinsextra)
if freq_mask:
freq_lims = freq_mask.split(',')
upper = float(freq_lims[1])
lower = float(freq_lims[0])
freqs = rawdatafile.frequencies
extra_mask = np.squeeze(np.where((freqs>lower)&(freqs<upper)))
print(np.squeeze(extra_mask))
# Masking
if mask and maskfn:
data, masked_chans = maskfile(maskfn, data, start_bin, nbinsextra,extra_mask)
else:
masked_chans = np.zeros(rawdatafile.nchan,dtype=bool)
# Bandpass correction
if maskfn and bandpass_corr:
bandpass = rfifind.rfifind(maskfn).bandpass_avg[::-1]
#bandpass[bandpass == 0] = np.min(bandpass[np.nonzero(bandpass)])
masked_chans[bandpass == 0] = True
# ignore top and bottom 1% of band
ignore_chans = np.ceil(0.01*rawdatafile.nchan)
masked_chans[:ignore_chans] = True
masked_chans[-ignore_chans:] = True
data_masked = np.ma.masked_array(data.data)
data_masked[masked_chans] = np.ma.masked
data.data = data_masked
if bandpass_corr:
data.data /= bandpass[:, None]
# Zerodm filtering
if (zerodm == True):
data.data -= data.data.mean(axis=0)
# Subband data
if (nsub is not None) and (subdm is not None):
data.subband(nsub, subdm, padval='mean')
# Dedisperse
if dm:
data.dedisperse(dm, padval='mean')
# Downsample
data.downsample(downsamp)
# scale data
data = data.scaled(scaleindep)
# Smooth
if width_bins > 1:
data.smooth(width_bins, padval='mean')
return data, nbinsextra, nbins, start
def plot_waterfall(data, start, duration,
integrate_ts=False, integrate_spec=False, show_cb=False,
cmap_str="gist_yarg", sweep_dms=[], sweep_posns=[],
ax_im=None, ax_ts=None, ax_spec=None, interactive=True,interactive_masking=False):
""" I want a docstring too!
"""
# Set up axes
if interactive:
fig = plt.figure()
# fig.canvas.set_window_title("Frequency vs. Time")
im_width = 0.6 if integrate_spec else 0.8
im_height = 0.6 if integrate_ts else 0.8
if not ax_im:
ax_im = plt.axes((0.15, 0.15, im_width, im_height))
if integrate_ts and not ax_ts:
ax_ts = plt.axes((0.15, 0.75, im_width, 0.2),sharex=ax_im)
if integrate_spec and not ax_spec:
ax_spec = plt.axes((0.75, 0.15, 0.2, im_height),sharey=ax_im)
# Ploting it up
nbinlim = np.int(duration/data.dt)
data_vals = data.data[..., :nbinlim]
if interactive_masking:
img = ax_im.imshow(data_vals,aspect = "auto",cmap='YlGnBu')
else:
img = ax_im.imshow(data_vals, aspect='auto',
cmap='YlGnBu',
interpolation='nearest', origin='upper',
extent=(data.starttime, data.starttime+ nbinlim*data.dt,
data.freqs.min(), data.freqs.max()))
if show_cb:
cb = ax_im.get_figure().colorbar(img)
cb.set_label("Scaled signal intensity (arbitrary units)")
#plt.axis('tight')
# Sweeping it up
for ii, sweep_dm in enumerate(sweep_dms):
ddm = sweep_dm-data.dm
delays = psr_utils.delay_from_DM(ddm, data.freqs)
delays -= delays.min()
if sweep_posns is None:
sweep_posn = 0.0
elif len(sweep_posns) == 1:
sweep_posn = sweep_posns[0]
else:
sweep_posn = sweep_posns[ii]
sweepstart = data.dt*data.numspectra*sweep_posn+data.starttime
sty = SWEEP_STYLES[ii%len(SWEEP_STYLES)]
ax_im.plot(delays+sweepstart, data.freqs, sty, lw=4, alpha=0.5)
# Dressing it up
ax_im.xaxis.get_major_formatter().set_useOffset(False)
ax_im.set_xlabel("Time")
ax_im.set_ylabel("Observing frequency (MHz)")
# Plot Time series
if integrate_ts:
Data = np.array(data.data[..., :nbinlim])
Dedisp_ts = Data.sum(axis=0)
times = (np.arange(data.numspectra)*data.dt + start)[..., :nbinlim]
ax_ts.plot(times, Dedisp_ts,"k")
ax_ts.set_xlim([times.min(),times.max()])
plt.setp(ax_ts.get_xticklabels(), visible = False)
plt.setp(ax_ts.get_yticklabels(), visible = False)
# Plot Spectrum
if integrate_spec:
spectrum_window = 0.05*duration
window_width = int(spectrum_window/data.dt) # bins
burst_bin = nbinlim//2
on_spec = np.array(data.data[..., burst_bin-window_width:burst_bin+window_width])
Dedisp_spec = on_spec.sum(axis=1)[::-1]
freqs = np.linspace(data.freqs.min(), data.freqs.max(), len(Dedisp_spec))
ax_spec.plot(Dedisp_spec,freqs,"k")
plt.setp(ax_spec.get_xticklabels(), visible = False)
plt.setp(ax_spec.get_yticklabels(), visible = False)
ax_spec.set_ylim([data.freqs.min(),data.freqs.max()])
if integrate_ts:
ax_ts.axvline(times[burst_bin]-spectrum_window,ls="--",c="grey")
ax_ts.axvline(times[burst_bin]+spectrum_window,ls="--",c="grey")
if interactive:
global masked_channels
masked_channels = []
data_vals = data.data[..., :nbinlim]
def key_press(event):
global start_mask, end_mask, masked_channels
# check if click is within plot boundaries
if event.key == "n":
if event.inaxes is not None:
# get y value at clicked location
y_val = event.ydata
#get evenly spaced y values between fch1 and fch1 + nchan*foff
y_vals = np.arange(data_vals.shape[0])
print(y_val,y_vals)
#figure out which y value is closest to the clicked y value
closest_y_val = np.argmin(np.abs(y_vals - y_val))
start_mask = closest_y_val
print(f"Start mask at {closest_y_val}")
elif event.key == "m":
if event.inaxes is not None:
# get y value at clicked location
y_val = event.ydata
#get evenly spaced y values between fch1 and fch1 + nchan*foff
y_vals = np.arange(data_vals.shape[0])
#figure out which y value is closest to the clicked y value
closest_y_val = np.argmin(np.abs(y_vals - y_val))
end_mask = closest_y_val
print(f"End mask at {closest_y_val}")
if start_mask != -1 and end_mask != -1:
start_ = min([start_mask,end_mask])
end_ = max([start_mask,end_mask])
to_mask = np.arange(start_,end_)
print(f"Replacing rows {start_} to {end_} with median")
data_vals[to_mask,:] = np.median(data_vals)
ax_im.clear()
ax_im.imshow(data_vals,aspect = "auto",cmap='YlGnBu')
#plot the new data
plt.gcf().canvas.draw_idle()
start_mask = -1
end_mask = -1
masked_channels += list(to_mask)
print_mask_chans = 1023-np.array(masked_channels)
#print masked_channels as comma seperated values
csv_mask_chans = ",".join([str(x) for x in print_mask_chans])
print(f"Masked channels: {csv_mask_chans}")
fig.suptitle("Frequency vs. Time")
fig.canvas.mpl_connect('key_press_event',
lambda ev: (ev.key in ('q','Q') and plt.close(fig)))
cid = fig.canvas.mpl_connect('key_press_event', key_press)
plt.show()
def main():
fn = args[0]
if fn.endswith(".fil"):
# Filterbank file
filetype = "filterbank"
rawdatafile = filterbank.FilterbankFile(fn)
elif fn.endswith(".fits"):
# PSRFITS file
filetype = "psrfits"
rawdatafile = psrfits.PsrfitsFile(fn)
else:
raise ValueError("Cannot recognize data file type from "
"extension. (Only '.fits' and '.fil' "
"are supported.)")
data, bins, nbins, start = waterfall(rawdatafile, options.start,
options.duration, dm=options.dm,
nbins=options.nbins, nsub=options.nsub,
subdm=options.subdm, zerodm=options.zerodm,
downsamp=options.downsamp,
scaleindep=options.scaleindep,
width_bins=options.width_bins, mask=options.mask,
maskfn=options.maskfile,
extra_mask=options.extra_mask,
bandpass_corr=options.bandpass_corr,
freq_mask=options.freq_mask)
plot_waterfall(data, start, options.duration, integrate_ts=options.integrate_ts,
integrate_spec=options.integrate_spec, show_cb=options.show_cb,
cmap_str=options.cmap, sweep_dms=options.sweep_dms,
sweep_posns=options.sweep_posns,interactive_masking=options.interactive_masking)
if __name__=='__main__':
parser = optparse.OptionParser(prog="waterfaller.py",
version="v0.9 Patrick Lazarus (Aug. 19, 2011)",
usage="%prog [OPTIONS] INFILE",
description="Create a waterfall plot to show the "
"frequency sweep of a single pulse "
"in psrFits data.")
parser.add_option('--subdm', dest='subdm', type='float',
help="DM to use when subbanding. (Default: "
"same as --dm)", default=None)
parser.add_option('--zerodm', dest='zerodm', action='store_true',
help="If this flag is set - Turn Zerodm filter - ON (Default: "
"OFF)", default=False)
parser.add_option('-s', '--nsub', dest='nsub', type='int',
help="Number of subbands to use. Must be a factor "
"of number of channels. (Default: "
"number of channels)", default=None)
parser.add_option('-d', '--dm', dest='dm', type='float',
help="DM to use when dedispersing data for plot. "
"(Default: 0 pc/cm^3)", default=0.0)
parser.add_option('--show-ts', dest='integrate_ts', action='store_true',
help="Plot the time series. "
"(Default: Do not show the time series)", default=False)
parser.add_option('--show-spec', dest='integrate_spec', action='store_true',
help="Plot the spectrum. "
"(Default: Do not show the spectrum)", default=False)
parser.add_option('--bandpass', dest='bandpass_corr', action='store_true',
help="Correct for the bandpass. Requires an rfifind "
"mask provided by --mask option."
"(Default: Do not remove bandpass)", default=False)
parser.add_option('-T', '--start-time', dest='start', type='float',
help="Time into observation (in seconds) at which "
"to start plot.")
parser.add_option('-t', '--duration', dest='duration', type='float',
help="Duration (in seconds) of plot.")
parser.add_option('-n', '--nbins', dest='nbins', type='int',
help="Number of time bins to plot. This option takes "
"precedence over -t/--duration if both are "
"provided.")
parser.add_option('--width-bins', dest='width_bins', type='int',
help="Smooth each channel/subband with a boxcar "
"this many bins wide. (Default: Don't smooth)",
default=1)
parser.add_option('--sweep-dm', dest='sweep_dms', type='float',
action='append',
help="Show the frequency sweep using this DM. "
"(Default: Don't show sweep)", default=[])
parser.add_option('--sweep-posn', dest='sweep_posns', type='float',
action='append',
help="Show the frequency sweep at this position. "
"The position refers to the high-frequency "
"edge of the plot. Also, the position should "
"be a number between 0 and 1, where 0 is the "
"left edge of the plot. "
"(Default: 0)", default=None)
parser.add_option('--downsamp', dest='downsamp', type='int',
help="Factor to downsample data by. (Default: 1).",
default=1)
parser.add_option('--maskfile', dest='maskfile', type='string',
help="Mask file produced by rfifind. Used for "
"masking and bandpass correction.",
default=None)
parser.add_option('--extramask', dest='extra_mask', type='string',
help="The extra channels you want masked, on top of the rfifind mask files",
default=np.array([]))
parser.add_option('--mask', dest='mask', action="store_true",
help="Mask data using rfifind mask (Default: Don't mask).",
default=False)
parser.add_option('--scaleindep', dest='scaleindep', action='store_true',
help="If this flag is set scale each channel "
"independently. (Default: Scale using "
"global maximum.)",
default=False)
parser.add_option('--show-colour-bar', dest='show_cb', action='store_true',
help="If this flag is set show a colour bar. "
"(Default: No colour bar.)",
default=False)
parser.add_option('--colour-map', dest='cmap',
help="The name of a valid matplotlib colour map."
"(Default: gist_yarg.)",
default='gist_yarg')
parser.add_option('--freq_mask', dest='freq_mask',
help="mask out certain frequencies",
default=None)
parser.add_option('--interactive_masking', dest='interactive_masking',
help="mask out certain frequencies interactively",
action='store_true', default=False)
options, args = parser.parse_args()
if not hasattr(options, 'start'):
raise ValueError("Start time (-T/--start-time) "
"must be given on command line!")
if (not hasattr(options, 'duration')) and (not hasattr(options, 'nbins')):
raise ValueError("One of duration (-t/--duration) "
"and num bins (-n/--nbins)"
"must be given on command line!")
if options.subdm is None:
options.subdm = options.dm
main()
|
CHIME-Pulsar-TimingREPO_NAMECHIME-Pulsar_automated_filterbankPATH_START.@CHIME-Pulsar_automated_filterbank_extracted@[email protected]@.PATH_END.py
|
{
"filename": "tools.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/exa/langchain_exa/tools.py",
"type": "Python"
}
|
"""Tool for the Exa Search API."""
from typing import Any, Dict, List, Optional, Union
from exa_py import Exa # type: ignore[untyped-import]
from exa_py.api import (
HighlightsContentsOptions, # type: ignore[untyped-import]
TextContentsOptions, # type: ignore[untyped-import]
)
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr, model_validator
from langchain_exa._utilities import initialize_client
class ExaSearchResults(BaseTool):
"""Exa Search tool.
Setup:
Install ``langchain-exa`` and set environment variable ``EXA_API_KEY``.
.. code-block:: bash
pip install -U langchain-exa
export EXA_API_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain-exa import ExaSearchResults
tool = ExaSearchResults()
Invocation with args:
.. code-block:: python
tool.invoke({"query":"what is the weather in SF","num_results":1})
.. code-block:: python
SearchResponse(results=[Result(url='https://www.wunderground.com/weather/37.8,-122.4', id='https://www.wunderground.com/weather/37.8,-122.4', title='San Francisco, CA Weather Conditionsstar_ratehome', score=0.1843988299369812, published_date='2023-02-23T01:17:06.594Z', author=None, text='The time period when the sun is no more than 6 degrees below the horizon at either sunrise or sunset. The horizon should be clearly defined and the brightest stars should be visible under good atmospheric conditions (i.e. no moonlight, or other lights). One still should be able to carry on ordinary outdoor activities. The time period when the sun is between 6 and 12 degrees below the horizon at either sunrise or sunset. The horizon is well defined and the outline of objects might be visible without artificial light. Ordinary outdoor activities are not possible at this time without extra illumination. The time period when the sun is between 12 and 18 degrees below the horizon at either sunrise or sunset. The sun does not contribute to the illumination of the sky before this time in the morning, or after this time in the evening. In the beginning of morning astronomical twilight and at the end of astronomical twilight in the evening, sky illumination is very faint, and might be undetectable. The time of Civil Sunset minus the time of Civil Sunrise. The time of Actual Sunset minus the time of Actual Sunrise. The change in length of daylight between today and tomorrow is also listed when available.', highlights=None, highlight_scores=None, summary=None)], autoprompt_string=None)
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"query":"what is the weather in SF","num_results":1}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
ToolMessage(content='Title: San Francisco, CA Weather Conditionsstar_ratehome\nURL: https://www.wunderground.com/weather/37.8,-122.4\nID: https://www.wunderground.com/weather/37.8,-122.4\nScore: 0.1843988299369812\nPublished Date: 2023-02-23T01:17:06.594Z\nAuthor: None\nText: The time period when the sun is no more than 6 degrees below the horizon at either sunrise or sunset. The horizon should be clearly defined and the brightest stars should be visible under good atmospheric conditions (i.e. no moonlight, or other lights). One still should be able to carry on ordinary outdoor activities. The time period when the sun is between 6 and 12 degrees below the horizon at either sunrise or sunset. The horizon is well defined and the outline of objects might be visible without artificial light. Ordinary outdoor activities are not possible at this time without extra illumination. The time period when the sun is between 12 and 18 degrees below the horizon at either sunrise or sunset. The sun does not contribute to the illumination of the sky before this time in the morning, or after this time in the evening. In the beginning of morning astronomical twilight and at the end of astronomical twilight in the evening, sky illumination is very faint, and might be undetectable. The time of Civil Sunset minus the time of Civil Sunrise. The time of Actual Sunset minus the time of Actual Sunrise. The change in length of daylight between today and tomorrow is also listed when available.\nHighlights: None\nHighlight Scores: None\nSummary: None\n', name='exa_search_results_json', tool_call_id='1')
""" # noqa: E501
name: str = "exa_search_results_json"
description: str = (
"A wrapper around Exa Search. "
"Input should be an Exa-optimized query. "
"Output is a JSON array of the query results"
)
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate the environment."""
values = initialize_client(values)
return values
def _run(
self,
query: str,
num_results: int,
text_contents_options: Optional[Union[TextContentsOptions, bool]] = None,
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.client.search_and_contents(
query,
num_results=num_results,
text=text_contents_options, # type: ignore
highlights=highlights, # type: ignore
include_domains=include_domains,
exclude_domains=exclude_domains,
start_crawl_date=start_crawl_date,
end_crawl_date=end_crawl_date,
start_published_date=start_published_date,
end_published_date=end_published_date,
use_autoprompt=use_autoprompt,
) # type: ignore
except Exception as e:
return repr(e)
class ExaFindSimilarResults(BaseTool):
"""Tool that queries the Metaphor Search API and gets back json."""
name: str = "exa_find_similar_results_json"
description: str = (
"A wrapper around Exa Find Similar. "
"Input should be an Exa-optimized query. "
"Output is a JSON array of the query results"
)
client: Exa = Field(default=None)
exa_api_key: SecretStr = Field(default=None)
exa_base_url: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate the environment."""
values = initialize_client(values)
return values
def _run(
self,
url: str,
num_results: int,
text_contents_options: Optional[Union[TextContentsOptions, bool]] = None,
highlights: Optional[Union[HighlightsContentsOptions, bool]] = None,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
exclude_source_domain: Optional[bool] = None,
category: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.client.find_similar_and_contents(
url,
num_results=num_results,
text=text_contents_options, # type: ignore
highlights=highlights, # type: ignore
include_domains=include_domains,
exclude_domains=exclude_domains,
start_crawl_date=start_crawl_date,
end_crawl_date=end_crawl_date,
start_published_date=start_published_date,
end_published_date=end_published_date,
exclude_source_domain=exclude_source_domain,
category=category,
) # type: ignore
except Exception as e:
return repr(e)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@exa@[email protected]@.PATH_END.py
|
{
"filename": "minibatch_rv.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/pymc/variational/minibatch_rv.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Sequence
from typing import Any, cast
import pytensor.tensor as pt
from pytensor import Variable, config
from pytensor.graph import Apply, Op
from pytensor.tensor import NoneConst, TensorVariable, as_tensor_variable
from pymc.logprob.abstract import MeasurableOp, _logprob
from pymc.logprob.basic import logp
class MinibatchRandomVariable(MeasurableOp, Op):
"""RV whose logprob should be rescaled to match total_size."""
__props__ = ()
view_map = {0: [0]}
def make_node(self, rv, *total_size):
rv = as_tensor_variable(rv)
total_size = [
as_tensor_variable(t, dtype="int64", ndim=0) if t is not None else NoneConst
for t in total_size
]
assert len(total_size) == rv.ndim
out = rv.type()
return Apply(self, [rv, *total_size], [out])
def perform(self, node, inputs, output_storage):
output_storage[0][0] = inputs[0]
minibatch_rv = MinibatchRandomVariable()
EllipsisType = Any # EllipsisType is not present in Python 3.8 yet
def create_minibatch_rv(
rv: TensorVariable,
total_size: int | None | Sequence[int | EllipsisType | None],
) -> TensorVariable:
"""Create variable whose logp is rescaled by total_size."""
if isinstance(total_size, int):
if rv.ndim <= 1:
total_size = [total_size]
else:
missing_ndims = rv.ndim - 1
total_size = [total_size] + [None] * missing_ndims
elif isinstance(total_size, list | tuple):
total_size = list(total_size)
if Ellipsis in total_size:
# Replace Ellipsis by None
if total_size.count(Ellipsis) > 1:
raise ValueError("Only one Ellipsis can be present in total_size")
sep = total_size.index(Ellipsis)
begin = total_size[:sep]
end = total_size[sep + 1 :]
missing_ndims = max((rv.ndim - len(begin) - len(end), 0))
total_size = begin + [None] * missing_ndims + end
if len(total_size) > rv.ndim:
raise ValueError(f"Length of total_size {total_size} is langer than RV ndim {rv.ndim}")
else:
raise TypeError(f"Invalid type for total_size: {total_size}")
return cast(TensorVariable, minibatch_rv(rv, *total_size))
def get_scaling(total_size: Sequence[Variable], shape: TensorVariable) -> TensorVariable:
"""Get scaling constant for logp."""
# mypy doesn't understand we can convert a shape TensorVariable into a tuple
shape = tuple(shape) # type: ignore[assignment]
# Scalar RV
if len(shape) == 0: # type: ignore[arg-type]
coef = total_size[0] if not NoneConst.equals(total_size[0]) else 1.0
else:
coefs = [t / shape[i] for i, t in enumerate(total_size) if not NoneConst.equals(t)]
coef = pt.prod(coefs)
return pt.cast(coef, dtype=config.floatX)
@_logprob.register(MinibatchRandomVariable)
def minibatch_rv_logprob(op, values, *inputs, **kwargs):
[value] = values
rv, *total_size = inputs
return logp(rv, value, **kwargs) * get_scaling(total_size, value.shape)
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@pymc@variational@[email protected]_END.py
|
{
"filename": "tool.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/tools/nuclia/tool.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.nuclia.tool import NUASchema, NucliaUnderstandingAPI
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NUASchema": "langchain_community.tools.nuclia.tool",
"NucliaUnderstandingAPI": "langchain_community.tools.nuclia.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NUASchema",
"NucliaUnderstandingAPI",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@tools@[email protected]@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "tcallister/learning-p-det",
"repo_path": "learning-p-det_extracted/learning-p-det-main/docs/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'learning-p-det'
copyright = '2024, T. Callister'
author = 'T. Callister'
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../code/'))
import sphinx_rtd_theme
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ['numpydoc','sphinx_rtd_theme','sphinx.ext.autosectionlabel', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.githubpages','sphinx.ext.napoleon','sphinx.ext.viewcode']
templates_path = ['templates']
exclude_patterns = ['build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['static']
html_theme_options = {'body_max_width':'100%'}
|
tcallisterREPO_NAMElearning-p-detPATH_START.@learning-p-det_extracted@learning-p-det-main@[email protected]@.PATH_END.py
|
{
"filename": "_dividerwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/yaxis/_dividerwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DividerwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="dividerwidth", parent_name="layout.yaxis", **kwargs
):
super(DividerwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@yaxis@[email protected]_END.py
|
{
"filename": "conftest.py",
"repo_name": "jdhenshaw/scousepy",
"repo_path": "scousepy_extracted/scousepy-master/scousepy/conftest.py",
"type": "Python"
}
|
"""Configure Test Suite.
This file is used to configure the behavior of pytest when using the Astropy
test infrastructure. It needs to live inside the package in order for it to
get picked up when running the tests inside an interpreter using
`scousepy.test()`.
"""
import os
from astropy.version import version as astropy_version
# For Astropy 3.0 and later, we can use the standalone pytest plugin
if astropy_version < '3.0':
from astropy.tests.pytest_plugins import * # noqa
del pytest_report_header
ASTROPY_HEADER = True
else:
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
"""Configure Pytest with Astropy.
Parameters
----------
config : pytest configuration
"""
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the tests.
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
from . import __version__
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = __version__
# Uncomment the last two lines in this block to treat all DeprecationWarnings as
# exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
# as follow (although default should work for most cases).
# To ignore some packages that produce deprecation warnings on import
# (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
# 'setuptools'), add:
# modules_to_ignore_on_import=['module_1', 'module_2']
# To ignore some specific deprecation warning messages for Python version
# MAJOR.MINOR or later, add:
# warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# from astropy.tests.helper import enable_deprecations_as_exceptions # noqa
# enable_deprecations_as_exceptions()
|
jdhenshawREPO_NAMEscousepyPATH_START.@scousepy_extracted@scousepy-master@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "nombac/optab",
"repo_path": "optab_extracted/optab-main/README.md",
"type": "Markdown"
}
|
# **`Optab`**
### **Public Fortran90 code package for generating ideal-gas opacity tables**
To execute radiation hydrodynamics simulations, the equation of state and opacity are both critical components. Ideally, these elements should originate from the same set of chemical equilibrium abundances, yet this alignment is not always practiced. `Optab` is designed to calculate opacity based on the chemical equilibrium abundances provided by the user. It outputs both mean and monochromatic opacities, thus enabling the creation of opacity tables that are consistent with the user's equation of state.
<img src="./sample/tab100/input/eos/eos.eps.png" width="160"><img src="./sample/tab100/output/ross.png" width="160"><img src="./sample/tab100/output/pla.png" width="160"><img src="./sample/tab100/output/pla2.png" width="160"><img src="./sample/tab100/output/mono_03000.png" width="160">
### Opacity sources currently implemented
- Line absorption
- Atomic lines
- [Kurucz](http://kurucz.harvard.edu/)
- Molecular lines
- [HITRAN](https://hitran.org/)
- [Exomol](https://www.exomol.com/)
- Continuum absorption
- Bremsstrahlung
- [van Hoof et al. (2014)](https://doi.org/10.1093/mnras/stu1438)
- [John (1988)](https://ui.adsabs.harvard.edu/abs/1988A&A...193..189J): H<sup>-</sup>
- [John (1975)](https://doi.org/10.1093/mnras/172.2.305): H<sub>2</sub><sup>-</sup>
- Photoionization
- [Verner & Yakovlev (1995)](https://ui.adsabs.harvard.edu/abs/1995A&AS..109..125V)
- [Verner et al. (1996)](https://ui.adsabs.harvard.edu/abs/1996ApJ...465..487V)
- [Mathisen's compilation](https://inis.iaea.org/search/search.aspx?orig_q=RN:16033032)
- [TOPbase](http://cdsweb.u-strasbg.fr/topbase/topbase.html)
- [Ohmura & Ohmura (1960)](https://doi.org/10.1103/PhysRev.118.154): H<sup>-</sup>
- [Yan et al. (2001)](https://iopscience.iop.org/article/10.1086/322775): H<sub>2</sub>
- Collision-induced absorption (EXPERIMENTAL)
- [HITRAN](https://hitran.org/cia/)
- Scattering
- Thomson scattering
- Rayleigh scattering
- [Rohrmann & Rueda (2022)](https://doi.org/10.1051/0004-6361/202243883): H
- [Rohrmann (2017)](https://doi.org/10.1093/mnras/stx2440): He
- [Tarafdar & Vardya (1973)](https://doi.org/10.1093/mnras/163.3.261): H<sub>2</sub>
## Author
Shigenobu Hirose (JAMSTEC, [email protected])
## Reference
Hirose, S., Hauschildt, P., Minoshima, T., Tomida, K., and Sano, T.
- Astronomy and Astrophysics 659, A87 (2022): https://doi.org/10.1051/0004-6361/202141076
- arXiv: https://arxiv.org/abs/2112.05689
---
## Quick start guide for `optab`
1. **Build `optab` Executables**
- Requirements:
- [GNU Fortran](https://gcc.gnu.org/)
- [Open MPI](https://www.open-mpi.org/)
- [HDF5](https://www.hdfgroup.org/solutions/hdf5/)
- Source directories:
- `src/`: Contains source codes of `optab`
- `eos/src/`: Contains source codes for creating HDF5 chemical abundance tables
- `database/src/`: Contains source codes for generating HDF5 opacity databases
- Build instructions:\
Navigate to each source directory and execute make. If necessary, adjust the Makefile to set the paths for Open MPI and HDF5, as well as the Fortran compiler flags. Example Makefile adjustments:
```makefile
OPENMPI = /opt/local
HDF5 = /opt/local
FFLAGS = -Wall #-fbounds-check
```
1. **Build the `optab` Database**\
See [`database/README.md`](database/README.md).
1. **Run `optab` Sample**\
See [`sample/README.md`](sample/README.md).
---
**EOF**
|
nombacREPO_NAMEoptabPATH_START.@optab_extracted@[email protected]@.PATH_END.py
|
{
"filename": "test_real_transforms.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/fftpack/tests/test_real_transforms.py",
"type": "Python"
}
|
from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
from pytest import raises as assert_raises
from scipy.fftpack.realtransforms import (
dct, idct, dst, idst, dctn, idctn, dstn, idstn)
# Matlab reference data
MDATA = np.load(join(dirname(__file__), 'test.npz'))
X = [MDATA['x%d' % i] for i in range(8)]
Y = [MDATA['y%d' % i] for i in range(8)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def dct_2d_ref(x, **kwargs):
""" used as a reference in testing dct2. """
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dct(x[:, col], **kwargs)
return x
def idct_2d_ref(x, **kwargs):
""" used as a reference in testing idct2. """
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idct(x[:, col], **kwargs)
return x
def dst_2d_ref(x, **kwargs):
""" used as a reference in testing dst2. """
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dst(x[:, col], **kwargs)
return x
def idst_2d_ref(x, **kwargs):
""" used as a reference in testing idst2. """
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idst(x[:, col], **kwargs)
return x
class TestComplex(object):
def test_dct_complex64(self):
y = dct(1j*np.arange(5, dtype=np.complex64))
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dct_complex(self):
y = dct(np.arange(5)*1j)
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_idct_complex(self):
y = idct(np.arange(5)*1j)
x = 1j*idct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex64(self):
y = dst(np.arange(5, dtype=np.complex64)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex(self):
y = dst(np.arange(5)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_idst_complex(self):
y = idst(np.arange(5)*1j)
x = 1j*idst(np.arange(5))
assert_array_almost_equal(x, y)
class _TestDCTBase(object):
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
def test_axis(self):
nt = 2
for i in [7, 8, 9, 16, 32, 64]:
x = np.random.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
decimal=self.dec)
class _TestDCTIIBase(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondance with matlab (orthornomal mode).
for i in range(len(X)):
dt = np.result_type(np.float32, self.rdt)
x = np.array(X[i], dtype=dt)
yr = Y[i]
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
class _TestDCTIIIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
class TestDCTIDouble(_TestDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestDCTIFloat(_TestDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 1
class TestDCTIInt(_TestDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDCTIIDouble(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestDCTIIFloat(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestDCTIIInt(_TestDCTIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestDCTIIIDouble(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDCTIIIFloat(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIIIInt(_TestDCTIIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestIDCTBase(object):
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i-1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDCTIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestIDCTIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDCTIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDCTIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestIDCTIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestIDCTIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestIDCTIIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDCTIIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestIDCTIIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestDSTBase(object):
def setup_method(self):
self.rdt = None # dtype
self.dec = None # number of decimals to match
self.type = None # dst type
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
y = dst(xr, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestDSTIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 1
class TestDSTIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 1
class TestDSTIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDSTIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestDSTIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestDSTIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestDSTIIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDSTIIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
class TestDSTIIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 7
self.type = 3
class _TestIDSTBase(object):
def setup_method(self):
self.rdt = None
self.dec = None
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
x = idst(yr, type=self.type)
if self.type == 1:
x /= 2 * (i+1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(x) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDSTIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestIDSTIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDSTIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDSTIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestIDSTIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestIDSTIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestIDSTIIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDSTIIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 3
class TestIDSTIIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 3
class TestOverwrite(object):
"""Check input overwrite behavior """
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x,
should_overwrite, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
if type == 1 and norm == 'ortho':
continue
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and (len(shape) == 1 or
(axis % len(shape) == len(shape)-1
)))
self._check(data, routine, type, None, axis, norm,
overwrite_x, should_overwrite)
def test_dct(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1, overwritable)
self._check_1d(dct, dtype, (16, 2), 0, overwritable)
self._check_1d(dct, dtype, (2, 16), 1, overwritable)
def test_idct(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1, overwritable)
self._check_1d(idct, dtype, (16, 2), 0, overwritable)
self._check_1d(idct, dtype, (2, 16), 1, overwritable)
def test_dst(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1, overwritable)
self._check_1d(dst, dtype, (16, 2), 0, overwritable)
self._check_1d(dst, dtype, (2, 16), 1, overwritable)
def test_idst(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1, overwritable)
self._check_1d(idst, dtype, (16, 2), 0, overwritable)
self._check_1d(idst, dtype, (2, 16), 1, overwritable)
class Test_DCTN_IDCTN(object):
dec = 14
types = [1, 2, 3]
norms = [None, 'ortho']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
# Sets of functions to test
function_sets = [dict(forward=dctn,
inverse=idctn,
forward_ref=dct_2d_ref,
inverse_ref=idct_2d_ref),
dict(forward=dstn,
inverse=idstn,
forward_ref=dst_2d_ref,
inverse_ref=idst_2d_ref), ]
def test_axes_round_trip(self):
norm = 'ortho'
for function_set in self.function_sets:
fforward = function_set['forward']
finverse = function_set['inverse']
for axes in [None, (1, ), (0, ), (0, 1), (-2, -1)]:
for dct_type in self.types:
if norm == 'ortho' and dct_type == 1:
continue # 'ortho' not supported by DCT-I
tmp = fforward(self.data, type=dct_type, axes=axes,
norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
def test_dctn_vs_2d_reference(self):
for function_set in self.function_sets:
fforward = function_set['forward']
fforward_ref = function_set['forward_ref']
for dct_type in self.types:
for norm in self.norms:
if norm == 'ortho' and dct_type == 1:
continue # 'ortho' not supported by DCT-I
y1 = fforward(self.data, type=dct_type, axes=None,
norm=norm)
y2 = fforward_ref(self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
def test_idctn_vs_2d_reference(self):
for function_set in self.function_sets:
finverse = function_set['inverse']
finverse_ref = function_set['inverse_ref']
for dct_type in self.types:
for norm in self.norms:
print(function_set, dct_type, norm)
if norm == 'ortho' and dct_type == 1:
continue # 'ortho' not supported by DCT-I
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = finverse(fdata, type=dct_type, norm=norm)
y2 = finverse_ref(fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
def test_axes_and_shape(self):
for function_set in self.function_sets:
fforward = function_set['forward']
finverse = function_set['inverse']
# shape must match the number of axes
assert_raises(ValueError, fforward, self.data,
shape=(self.data.shape[0], ),
axes=(0, 1))
assert_raises(ValueError, fforward, self.data,
shape=(self.data.shape[0], ),
axes=None)
assert_raises(ValueError, fforward, self.data,
shape=self.data.shape,
axes=(0, ))
# shape must be a tuple
assert_raises(TypeError, fforward, self.data,
shape=self.data.shape[0],
axes=(0, 1))
# shape=None works with a subset of axes
for axes in [(0, ), (1, )]:
tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
# non-default shape
tmp = fforward(self.data, shape=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@scipy@fftpack@tests@[email protected]_END.py
|
{
"filename": "plot_extraction_frame.py",
"repo_name": "JamesKirk11/Tiberius",
"repo_path": "Tiberius_extracted/Tiberius-main/src/reduction_utils/plot_extraction_frame.py",
"type": "Python"
}
|
#### Author of this code: James Kirk
#### Contact: [email protected]
from astropy.io import fits
import pickle
import matplotlib.pyplot as plt
from matplotlib import gridspec
import argparse
import numpy as np
# Prevent matplotlib plotting frames upside down
plt.rcParams['image.origin'] = 'lower'
parser = argparse.ArgumentParser(description='Plot a through slit image along with traces and frames for an extraction. NOTE: currently only set up for ACAM.')
parser.add_argument('-x1','--x1',help="parse the pickled x positions of the first trace. Note can parse 2 files if a separate extraction was run in the red end.",nargs="+")
parser.add_argument('-x2','--x2',help="parse the pickled x positions of the second trace. Note can parse 2 files if a separate extraction was run in the red end.",nargs="+")
parser.add_argument('-apw','--aperture_widths',help="Define the widths of the target apertures used. Can parse > 1 aperture width.",nargs="+",type=int)
parser.add_argument('-bkw','--background_widths',help="Define the widths of the background apertures used. Can parse > 1 aperture width.",nargs="+",type=int)
parser.add_argument('-bko','--background_offset',help="Define the offset between the background and target apertures used. Can parse > 1 aperture width.",nargs="+",type=int)
parser.add_argument("-thru",'--thru_slit',help="parse the fits through slit image if wanting to plot this in addition to a science frame")
parser.add_argument("-sci","--science",help="parse the science frame for plotting")
parser.add_argument("-bias","--bias",help="parse the bias frame")
parser.add_argument("-flat","--flat",help="parse the flat frame")
parser.add_argument("-sci_lims","--science_limits",help="parse a minimum and maximum scaling for science plotting if wanting to override default. IMPORTANT: must use the first science frame of the night as the first trace locations of the night are used.",nargs="+",type=float)
parser.add_argument("-thru_lims","--thru_slit_limits",help="parse a minimum and maximum scaling for thru slit plotting if wanting to override default.",nargs="+",type=float)
parser.add_argument("-rw","--row_min",help="parse minimum row at which trace was extracted (this also cuts the plot to this minimum row).",type=int)
parser.add_argument("-rm","--row_max",help="parse maximum row at which trace was extracted (this also cuts the plot to this maximum row).",type=int)
parser.add_argument("-s","--save_figure",help="use to save the resulting plot to a png file.",action="store_true")
args = parser.parse_args()
science = fits.open(args.science)
if args.bias is not None:
bias = fits.open(args.bias)[0].data
if args.flat is not None:
flat = fits.open(args.flat)[0].data
else:
flat = np.ones_like(science[1].data)
nwindows = len(science) - 1
if args.thru_slit is not None:
thru_slit = fits.open(args.thru_slit)
nplot_rows = 2
else:
thru_slit = None
nplot_rows = 1
if nwindows == 2:
nplot_cols = 2
else:
nplot_cols = 1
# Plots limits
if args.science_limits is None:
vmin_sci = 1e2
vmax_sci = 5e3
else:
vmin_sci = args.science_limits[0]
vmax_sci = args.science_limits[1]
if args.thru_slit_limits is None:
vmin_thru = 1e2
vmax_thru = 5e3
else:
vmin_thru = args.thru_slit_limits[0]
vmax_thru = args.thru_slit_limits[1]
nextractions = len(args.x1)
aperture_widths = args.aperture_widths
background_widths = args.background_widths
background_offsets = args.background_offset
if len(aperture_widths) == 1:
aperture_widths = aperture_widths*2
if len(background_widths) == 1:
background_widths = background_widths*2
if len(background_offsets) == 1:
background_offsets = background_offsets*2
# Load in trace locations
if nextractions > 1:
x1_b = pickle.load(open(args.x1[0],'rb'))
x1_r = pickle.load(open(args.x1[1],'rb'))
x1 = np.hstack((x1_b[0],x1_r[0]))
x2_b = pickle.load(open(args.x2[0],'rb'))
x2_r = pickle.load(open(args.x2[1],'rb'))
x2 = np.hstack((x2_b[0],x2_r[0]))
else:
x1 = pickle.load(open(args.x1[0],'rb'))[0]
x2 = pickle.load(open(args.x2[0],'rb'))[0]
rows = np.arange(args.row_min,args.row_max)
### Plot figure
# fig = plt.figure(figsize=(6,10))
# Make figure aspect ratio match science frame.
nrows,ncols = science[1].data.shape
fig_height = 8
fig_width = (len(rows)/ncols)*fig_height
if nwindows > 1:
fig_width *= 2
plt.figure(figsize=(fig_width,fig_height))
# set up gridspec
if thru_slit is not None:
height_ratio = [100/len(rows),1]
else:
height_ratio = None
gs1 = gridspec.GridSpec(nplot_rows,nplot_cols,height_ratios=height_ratio)
gs1.update(wspace=0.02, hspace=0.02) # set the spacing between axes.
# record running number of plots
# plot_index = 1
plot_index = 0
ax1 = plt.subplot(gs1[0])
# Through slit image
if thru_slit is not None:
ax1.set_ylim(840-50,840+50)
ax1.imshow(thru_slit[1].data,vmin=vmin_thru,vmax=vmax_thru)
# plot vertical line at star location
# ax1.axvline(x1.mean())
if nwindows > 1:
plot_index += 1
ax1a = plt.subplot(gs1[plot_index])
ax1a.imshow(thru_slit[2].data,vmin=vmin_thru,vmax=vmax_thru)
# vertical line at star locations
ax1a.axvline(x2.mean())
ax1a.set_ylim(840-50,840+50)
ax1a.axis("off")
ax1a.set_aspect('auto')
else:
# ax1.axvline(x2.mean())
ax1.axis("off")
ax1.set_aspect('auto')
plot_index += 1
# Science image
if plot_index >= 1:
ax2 = plt.subplot(gs1[plot_index])
else:
ax2 = ax1
ax2.imshow((science[1].data-bias[0])/flat[0],vmin=vmin_sci,vmax=vmax_sci)
ax2.set_aspect('auto')
# target aperture
ax2.plot(np.ceil(x1+aperture_widths[0]//2),rows,color='b',lw=1)
ax2.plot(np.floor(x1-aperture_widths[0]//2),rows,color='b',lw=1)
# check whether buffer pixels have been used
# buffer_pixels = np.where(x1-aperture_widths[0]//2-background_offsets[0]-background_widths[0] <= 20)[0]
# background aperture
ax2.plot(np.ceil(x1+aperture_widths[0]//2)+background_offsets[0],rows,color='b',ls='--',lw=1)
ax2.plot(np.ceil(x1+aperture_widths[0]//2)+background_offsets[0]+background_widths[0],rows,color='b',ls='--',lw=1)
ax2.plot(np.floor(x1-aperture_widths[0]//2)-background_offsets[0],rows,color='b',ls='--',lw=1)
ax2.plot(np.floor(x1-aperture_widths[0]//2)-background_offsets[0]-background_widths[0],rows,color='b',ls='--',lw=1)
ax2.set_ylim(args.row_min,args.row_max)
ax2.set_ylabel('Y pixel')
ax2.set_xlabel('X pixel')
if nwindows > 1:
plot_index += 1
ax2a = plt.subplot(gs1[plot_index])
ax2a.imshow((science[2].data-bias[1])/flat[1],vmin=vmin_sci,vmax=vmax_sci)
# ax2a.plot(x2,np.arange(nrows),color='b',lw=1)
# target aperture
ax2a.plot(np.ceil(x2+aperture_widths[1]//2),rows,color='b',lw=1)
ax2a.plot(np.floor(x2-aperture_widths[1]//2),rows,color='b',lw=1)
# background aperture
ax2a.plot(np.ceil(x2+aperture_widths[1]//2)+background_offsets[1],rows,color='b',ls='--',lw=1)
ax2a.plot(np.ceil(x2+aperture_widths[1]//2)+background_offsets[1]+background_widths[1],rows,color='b',ls='--',lw=1)
ax2a.plot(np.floor(x2-aperture_widths[1]//2)-background_offsets[1],rows,color='b',ls='--',lw=1)
ax2a.plot(np.floor(x2-aperture_widths[1]//2)-background_offsets[1]-background_widths[1],rows,color='b',ls='--',lw=1)
ax2a.set_ylim(args.row_min,args.row_max)
ax2a.set_aspect('auto')
ax2a.set_xlabel('X pixel')
else:
# ax2.plot(x2,rows,color='b',lw=1)
# target aperture
ax2.plot(np.ceil(x2+aperture_widths[1]//2),rows,color='b',lw=1)
ax2.plot(np.floor(x2-aperture_widths[1]//2),rows,color='b',lw=1)
# background aperture
ax2.plot(np.ceil(x2+aperture_widths[1]//2)+background_offsets[1],rows,color='b',ls='--',lw=1)
ax2.plot(np.ceil(x2+aperture_widths[1]//2)+background_offsets[1]+background_widths[1],rows,color='b',ls='--',lw=1)
ax2.plot(np.floor(x2-aperture_widths[1]//2)-background_offsets[1],rows,color='b',ls='--',lw=1)
ax2.plot(np.floor(x2-aperture_widths[1]//2)-background_offsets[1]-background_widths[1],rows,color='b',ls='--',lw=1)
if args.save_figure:
plt.savefig('extraction_frame.pdf',bbox_inches='tight')
plt.show()
|
JamesKirk11REPO_NAMETiberiusPATH_START.@Tiberius_extracted@Tiberius-main@src@reduction_utils@[email protected]_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/polar/radialaxis/_title.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Title(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.polar.radialaxis"
_path_str = "layout.polar.radialaxis.title"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.radialaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.layout.polar.radialaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.polar.r
adialaxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.polar.radialaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@polar@radialaxis@[email protected]_END.py
|
{
"filename": "core.py",
"repo_name": "mkelley/mskpy",
"repo_path": "mskpy_extracted/mskpy-main/mskpy/photometry/core.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
core --- Core code for photometry.
==================================
.. autosummary::
:toctree: generated/
Functions
---------
airmass_app
airmass_loc
cal_airmass
cal_color_airmass
"""
from ..util import autodoc
import numpy as np
import astropy.units as u
__all__ = [
'airmass_app',
'airmass_loc',
'cal_airmass',
'cal_color',
'cal_color_airmass',
]
def airmass_app(z_true, h):
"""Apparent airmass.
Hardie's 1962 formula, with a correction for atmospheric
refraction. Used by Farnham and Schleicher 2000.
For OH, use `airmass_loc`.
Parameters
----------
z_true : Angle or Quantity
The object's true zenith angle.
h : Quantity
The observer's elevation.
"""
tan = np.tan(z_true)
exp = np.exp(-h.to(u.km).value / 8.0)
z_app = z_true - (60.4 * tan - 0.0668 * tan**3) / 3600. * exp * u.deg
sec = 1.0 / np.cos(z_app)
X = (sec - 0.0018167 * (sec - 1) - 0.002875 * (sec - 1)**2
- 0.0008083 * (sec - 1)**3)
return X.value
def airmass_loc(z_true):
"""Airmass based on local zenith angle.
Use for OH extinction.
Parameters
----------
z : Angle or Quantity
The object's true zenith angle.
"""
R = 6378.
H = 22.
X = (R + H) / np.sqrt((R + H)**2 - (R * np.sin(z_true))**2)
return X.value
def cal_airmass(m, munc, M, X, guess=(25., -0.1),
covar=False):
"""Calibraton coefficients, based on airmass.
Parameters
----------
m : array
Instrumental (uncalibrated) magnitude.
munc : array
Uncertainties on m.
M : array
Calibrated magnitude.
X : array
Airmass.
guess : array, optional
An intial guess for the fitting algorithm.
covar : bool, optional
Set to `True` to return the covariance matrix.
Results
-------
A : ndarray
The photometric zero point, and airmass correction slope. [mag,
mag/airmass]
unc or cov : ndarray
Uncertainties on each parameter, based on least-squares fitting,
or the covariance matrix, if `covar` is `True`.
"""
from scipy.optimize import leastsq
def chi(A, m, munc, M, X):
model = M - A[0] + A[1] * X
chi = (np.array(m) - model) / np.array(munc)
return chi
output = leastsq(chi, guess, args=(m, munc, M, X),
full_output=True, epsfcn=1e-3)
fit = output[0]
cov = output[1]
err = np.sqrt(np.diag(cov))
if covar:
return fit, cov
else:
return fit, err
def cal_color_airmass(m, munc, M, color, X, guess=(25., -0.1, -0.01),
covar=False):
"""Calibraton coefficients, based on airmass and color index.
Parameters
----------
m : array
Instrumental (uncalibrated) magnitude.
munc : array
Uncertainties on m.
M : array
Calibrated magnitude.
color : array
Calibrated color index, e.g., V - R.
X : array
Airmass.
guess : array, optional
An initial guess for the fitting algorithm.
covar : bool, optional
Set to `True` to return the covariance matrix.
Results
-------
A : ndarray
The photometric zero point, airmass correction slope, and color
correction slope. [mag, mag/airmass, mag/color index]
unc or cov : ndarray
Uncertainties on each parameter, based on least-squares fitting,
or the covariance matrix, if `covar` is `True`.
"""
from scipy.optimize import leastsq
def chi(A, m, munc, M, color, X):
model = M - A[0] + A[1] * X + A[2] * color
chi = (np.array(m) - model) / np.array(munc)
return chi
output = leastsq(chi, guess, args=(m, munc, M, color, X),
full_output=True, epsfcn=1e-3)
fit = output[0]
cov = output[1]
err = np.sqrt(np.diag(cov))
if covar:
return fit, cov
else:
return fit, err
def cal_color(m, munc, M, color, guess=(25., -0.01),
covar=False):
"""Calibraton coefficients, based on color index.
Parameters
----------
m : array
Instrumental (uncalibrated) magnitude.
munc : array
Uncertainties on m.
M : array
Calibrated magnitude.
color : array
Calibrated color index, e.g., V - R.
guess : array, optional
An initial guess for the fitting algorithm.
covar : bool, optional
Set to `True` to return the covariance matrix.
Results
-------
A : ndarray
The photometric zero point and color correction slope. [mag,
mag/color index]
unc or cov : ndarray
Uncertainties on each parameter, based on least-squares fitting,
or the covariance matrix, if `covar` is `True`.
"""
from scipy.optimize import leastsq
def chi(A, m, munc, M, color):
model = M - A[0] + A[1] * color
chi = (np.array(m) - model) / np.array(munc)
return chi
output = leastsq(chi, guess, args=(m, munc, M, color),
full_output=True, epsfcn=1e-3)
fit = output[0]
cov = output[1]
err = np.sqrt(np.diag(cov))
if covar:
return fit, cov
else:
return fit, err
# update module docstring
autodoc(globals())
del autodoc
|
mkelleyREPO_NAMEmskpyPATH_START.@mskpy_extracted@mskpy-main@mskpy@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/choroplethmap/colorbar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".title"],
["._tickfont.Tickfont", "._tickformatstop.Tickformatstop", "._title.Title"],
)
|
[email protected][email protected]@packages@python@plotly@plotly@graph_objs@choroplethmap@colorbar@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/Extending/burnman/material/__init__.py",
"type": "Python"
}
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@Extending@burnman@material@[email protected]_END.py
|
|
{
"filename": "warm_starting_util_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/training/warm_starting_util_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for warm_starting_util."""
import os
import numpy as np
from tensorflow.python.checkpoint import checkpoint as tracking_util
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import warm_starting_util as ws_util
ones = init_ops.ones_initializer
norms = init_ops.truncated_normal_initializer
rand = init_ops.random_uniform_initializer
zeros = init_ops.zeros_initializer
class WarmStartingUtilTest(test.TestCase):
def _write_vocab(self, string_values, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, "w") as f:
f.write("\n".join(string_values))
return vocab_file
def _write_checkpoint(self, sess):
self.evaluate(variables.global_variables_initializer())
saver = saver_lib.Saver()
ckpt_prefix = os.path.join(self.get_temp_dir(), "model")
saver.save(sess, ckpt_prefix, global_step=0)
def _create_prev_run_var(self,
var_name,
shape=None,
initializer=None,
partitioner=None):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
var = variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer,
partitioner=partitioner)
self._write_checkpoint(sess)
if partitioner:
self.assertTrue(isinstance(var, variables.PartitionedVariable))
var = var._get_variable_list()
return var, self.evaluate(var)
def _create_prev_run_vars(self,
var_names,
shapes,
initializers):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
all_vars = []
for var_name, shape, initializer in zip(var_names, shapes,
initializers):
all_vars.append(variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer))
self._write_checkpoint(sess)
return [self.evaluate(var) for var in all_vars]
def _create_dummy_inputs(self):
return {
"sc_int": array_ops.sparse_placeholder(dtypes.int32),
"sc_hash": array_ops.sparse_placeholder(dtypes.string),
"sc_keys": array_ops.sparse_placeholder(dtypes.string),
"sc_vocab": array_ops.sparse_placeholder(dtypes.string),
"real": array_ops.placeholder(dtypes.float32)
}
def _create_linear_model(self, feature_cols, partitioner):
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=feature_cols,
units=1,
cols_to_vars=cols_to_vars)
# Return a dictionary mapping each column to its variable.
return cols_to_vars
def _assert_cols_to_vars(self, cols_to_vars, cols_to_expected_values, sess):
for col, expected_values in cols_to_expected_values.items():
for i, var in enumerate(cols_to_vars[col]):
self.assertAllClose(expected_values[i], var.eval(sess))
def testWarmStartVar(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarPrevVarPartitioned(self):
_, weights = self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarCurrentVarPartitioned(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarBothVarsPartitioned(self):
_, weights = self._create_prev_run_var(
"old_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"new_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
prev_tensor_name, var = ws_util._get_var_info(
fruit_weights, prev_tensor_name="old_scope/fruit_weights")
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarWithVocab(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocab(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabConstrainedOldVocabSize(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
previous_vocab_size=2)
self.evaluate(variables.global_variables_initializer())
# Old vocabulary limited to ['apple', 'banana'].
self.assertAllClose([[0.], [0.], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
current_oov_buckets=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStartVarWithVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and two new elements.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 6,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStart_ListOfVariables(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[var])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var, prev_int_val)
def testWarmStart_ListOfStrings(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=["v1"])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var, prev_int_val)
def testWarmStart_TwoVarsFromTheSamePrevVar(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g):
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
var2 = variable_scope.get_variable(
"v2",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(),
vars_to_warm_start=["v1", "v2"],
var_name_to_prev_var_name=dict(v2="v1"))
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var, prev_int_val)
self.assertAllEqual(var2, prev_int_val)
def testWarmStart_ListOfRegexes(self):
# Save checkpoint from which to warm-start.
[prev_v1_val, prev_v1_momentum_val,
prev_v2_val, _] = self._create_prev_run_vars(
var_names=["v1", "v1/Momentum", "v2", "v2/Momentum"],
shapes=[[10, 1]] * 4,
initializers=[ones()] * 4)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
v1 = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
v1_momentum = variable_scope.get_variable(
"v1/Momentum",
shape=[10, 1],
initializer=zeros())
v2 = variable_scope.get_variable(
"v2",
shape=[10, 1],
initializer=zeros())
v2_momentum = variable_scope.get_variable(
"v2/Momentum",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(),
# This warm-starts both v1 and v1/Momentum, but only
# v2 (and not v2/Momentum).
vars_to_warm_start=["v1", "v2[^/]"])
self.evaluate(variables.global_variables_initializer())
# Verify the selection of weights were correctly warm-started (init
# overridden to ones).
self.assertAllEqual(v1, prev_v1_val)
self.assertAllEqual(v1_momentum, prev_v1_momentum_val)
self.assertAllEqual(v2, prev_v2_val)
self.assertAllEqual(v2_momentum, np.zeros([10, 1]))
def testWarmStart_SparseColumnIntegerized(self):
# Create feature column.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_int: [np.zeros([10, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=".*sc_int.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_int: [prev_int_val]}, sess)
def testWarmStart_SparseColumnHashed(self):
# Create feature column.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
# Save checkpoint from which to warm-start.
_, prev_hash_val = self._create_prev_run_var(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [np.zeros([15, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_hash.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [prev_hash_val]},
sess)
def testWarmStart_SparseColumnVocabulary(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_vocab.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_ExplicitCheckpointFile(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
# Explicitly provide the file prefix instead of just the dir.
os.path.join(self.get_temp_dir(), "model-0"),
vars_to_warm_start=".*sc_vocab.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_SparseColumnVocabularyConstrainedVocabSizes(self):
# Create old vocabulary, and use a size smaller than the total number of
# entries.
old_vocab_path = self._write_vocab(["apple", "guava", "banana"],
"old_vocab")
old_vocab_size = 2 # ['apple', 'guava']
# Create new vocab for sparse column "sc_vocab".
current_vocab_path = self._write_vocab(
["apple", "banana", "guava", "orange"], "current_vocab")
# Create feature column. Only use 2 of the actual entries, resulting in
# ['apple', 'banana'] for the new vocabulary.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=current_vocab_path, vocabulary_size=2)
# Save checkpoint from which to warm-start.
self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[2, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([2, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=old_vocab_path,
old_vocab_size=old_vocab_size)
ws_util.warm_start(
ckpt_to_initialize_from=self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. 'banana' isn't in the
# first two entries of the old vocabulary, so it's newly initialized.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [[[1], [0]]]}, sess)
def testWarmStart_BucketizedColumn(self):
# Create feature column.
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
# Save checkpoint from which to warm-start.
_, prev_bucket_val = self._create_prev_run_var(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [np.zeros([5, 1])]}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*real_bucketized.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [prev_bucket_val]}, sess)
def testWarmStart_MultipleCols(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature columns.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
cross = fc.crossed_column([sc_keys, sc_vocab], hash_bucket_size=20)
all_linear_cols = [sc_int, sc_hash, sc_keys, sc_vocab, real_bucket, cross]
# Save checkpoint from which to warm-start. Also create a bias variable,
# so we can check that it's also warm-started.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
sc_int_weights = variable_scope.get_variable(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
sc_hash_weights = variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"linear_model/sc_keys/weights", shape=[4, 1], initializer=rand())
sc_vocab_weights = variable_scope.get_variable(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
real_bucket_weights = variable_scope.get_variable(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
cross_weights = variable_scope.get_variable(
"linear_model/sc_keys_X_sc_vocab/weights",
shape=[20, 1],
initializer=rand())
bias = variable_scope.get_variable(
"linear_model/bias_weights",
shape=[1],
initializer=rand())
self._write_checkpoint(sess)
(prev_int_val, prev_hash_val, prev_keys_val, prev_vocab_val,
prev_bucket_val, prev_cross_val, prev_bias_val) = sess.run([
sc_int_weights, sc_hash_weights, sc_keys_weights, sc_vocab_weights,
real_bucket_weights, cross_weights, bias
])
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, all weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [np.zeros([10, 1])],
sc_hash: [np.zeros([15, 1])],
sc_keys: [np.zeros([4, 1])],
sc_vocab: [np.zeros([4, 1])],
real_bucket: [np.zeros([5, 1])],
cross: [np.zeros([20, 1])],
}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [prev_int_val],
sc_hash: [prev_hash_val],
sc_keys: [prev_keys_val],
sc_vocab: [prev_vocab_val],
real_bucket: [prev_bucket_val],
cross: [prev_cross_val],
"bias": [prev_bias_val],
}, sess)
def testWarmStartMoreSettings(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = self.evaluate(sc_keys_weights)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys:
np.split(prev_keys_val, 2),
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartMoreSettingsNoPartitioning(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = self.evaluate(sc_keys_weights)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols,
partitioner=None)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [prev_keys_val],
sc_hash: [np.zeros([15, 1])],
sc_vocab: [np.array([[3.], [2.], [1.], [0.5], [0.], [0.]])]
}, sess)
def testWarmStartVarsToWarmstartIsNone(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
# The special value of None here will ensure that only the variable
# specified in var_name_to_vocab_info (sc_vocab embedding) is
# warm-started.
vars_to_warm_start=None,
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
# Even though this is provided, the None value for
# vars_to_warm_start overrides the logic, and this will not be
# warm-started.
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_vocab should be correctly warm-started after vocab remapping,
# and neither of the other two should be warm-started..
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [np.zeros([2, 1]), np.zeros([2, 1])],
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartEmbeddingColumn(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"input_layer/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab_column = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab_column]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.input_layer(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[emb_vocab_column]):
vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab_column should be correctly warm-started after vocab
# remapping. Missing values are filled in with the EmbeddingColumn's
# initializer.
self._assert_cols_to_vars(
cols_to_vars, {
emb_vocab_column: [
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
}, sess)
def testWarmStartEmbeddingColumnLinearModel(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/weights",
initializer=[[0.69], [0.71]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
# Construct the vocab_info for the embedding weight.
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab_embedding/embedding_weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab should be correctly warm-started after vocab remapping.
# Missing values are filled in with the EmbeddingColumn's initializer.
self._assert_cols_to_vars(
cols_to_vars,
{
emb_vocab: [
# linear weights part 0.
np.array([[0.69]]),
# linear weights part 1.
np.array([[0.71]]),
# embedding_weights part 0.
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
# embedding_weights part 1.
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
},
sess)
def testErrorConditions(self):
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
# List of PartitionedVariable is invalid type when warm-starting with vocab.
self.assertRaises(TypeError, ws_util._warm_start_var_with_vocab, [x],
"/tmp", 5, "/tmp", "/tmp")
# Unused variable names raises ValueError.
with ops.Graph().as_default():
with self.cached_session() as sess:
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
self._write_checkpoint(sess)
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_vocab_info={"y": ws_util.VocabInfo("", 1, 0, "")})
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_prev_var_name={"y": "y2"})
def testWarmStartFromObjectBasedCheckpoint(self):
prev_val = [[0.5], [1.], [1.5], [2.]]
with ops.Graph().as_default() as g:
with self.session(graph=g):
prev_var = variable_scope.get_variable(
"fruit_weights",
initializer=prev_val)
self.evaluate(variables.global_variables_initializer())
# Save object-based checkpoint.
tracking_util.Checkpoint(v=prev_var).save(
os.path.join(self.get_temp_dir(), "checkpoint"))
with ops.Graph().as_default() as g:
with self.session(graph=g):
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util.warm_start(self.get_temp_dir())
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, self.evaluate(fruit_weights))
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@training@[email protected]_END.py
|
{
"filename": "_leaf.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/icicle/_leaf.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LeafValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="leaf", parent_name="icicle", **kwargs):
super(LeafValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Leaf"),
data_docs=kwargs.pop(
"data_docs",
"""
opacity
Sets the opacity of the leaves. With colorscale
it is defaulted to 1; otherwise it is defaulted
to 0.7
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@icicle@[email protected]_END.py
|
{
"filename": "test_gridding.ipynb",
"repo_name": "FRBs/zdm",
"repo_path": "zdm_extracted/zdm-main/papers/H0_I/Analysis/CRACO/test_gridding.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import zdm
import os, sys
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from zdm import survey
from zdm import io
from zdm import iteration as it
from zdm.craco import loading
matplotlib.rcParams['image.interpolation'] = None
```
/opt/anaconda3/envs/frbenv/lib/python3.9/site-packages/FRB-0.1.dev0-py3.9.egg/frb/halos/hmf.py:51: UserWarning: hmf_emulator not imported. Hope you are not intending to use the hmf.py module..
warnings.warn("hmf_emulator not imported. Hope you are not intending to use the hmf.py module..")
/var/folders/h1/kbzvqprx5p10xqkgz7w8g0880000gn/T/ipykernel_90171/3625334312.py:12: MatplotlibDeprecationWarning: Support for setting an rcParam that expects a str value to a non-str value is deprecated since 3.5 and support will be removed two minor releases later.
matplotlib.rcParams['image.interpolation'] = None
```python
defaultsize=14
ds=4
font = {'family' : 'normal',
'weight' : 'normal',
'size' : defaultsize}
matplotlib.rc('font', **font)
def main(p, survey, sv, ev, nv, nFRB=1000, iFRB=0, nz=500, ndm=1400, nbeams=5):
""" Args
p (str): parameter to vary
survey (str): MC survey to load
sv (int): start value to iterate from
ev (int): end value to iterate to
nv (int): number of pvalues to iterate over
nFRB (int, optional): number of FRBs to analyze. defaults to 100.
iFRB (int, optional): starting index for FRBs in MC survey. defaults to 0
ndm (int, optional): size of grid in DM. defaults to 1400.
nz (int, optional): size of grid in redshift. defaults to 500.
nbeams (int, optional): number of bins for beamshape
Returns
pvalues (array): parameter range iterated over
lls (array): total loglikelihood
lcontr (array, [pvalues,4]): individual loglikelihood cotributions [p(z|DM), p(DM), p(DM|z), p(z)]
"""
input_dict=io.process_jfile('Cubes/craco_full_cube.json')
# deconstruct the input_dict
state_dict, cube_dict, vparam_dict = it.parse_input_dict(input_dict)
survey, grid = loading.survey_and_grid(survey_name=survey, state_dict=state_dict, NFRB=nFRB, iFRB=iFRB, lum_func=2, nz=nz, ndm=ndm, Nbeams=nbeams)
# parameter to do gridding analysis on
vparams = {}
vparams[p] = None
vparams['lC'] = -0.9
lls_list=[]
lcontr_list=[]
# parameter values to iterate over
pvalues=np.linspace(sv, ev, nv)
grids=[grid]
surveys=[survey]
for i in pvalues:
vparams[p]=i
grid.update(vparams)
print(grid.state)
llsum,lllist,NFRB,lcontr = it.calc_likelihoods_2D(grid, survey, norm=True,psnr=True, dolist=5)
lcontr_list.append(lcontr)
lls=lllist[0]+lllist[2]
lls_list.append(lls)
imx = np.nanargmax(lls_list)
print(f"Max LL at {p}={pvalues[imx]}")
return pvalues, lls_list, lcontr_list
```
```python
pvalues, lls_default, lcontr_default= main(p="H0",survey="CRACO_alpha1_Planck18_Gamma",sv=55.0,ev=85.0,nv=12,nFRB=100,iFRB=100,nz=500,ndm=1400,nbeams=5)
pvalues, lls_low, lcontr_low= main(p="H0",survey="CRACO_alpha1_Planck18_Gamma",sv=55.0,ev=85.0,nv=12,nFRB=100,iFRB=100,nz=250,ndm=700,nbeams=5)
pvalues, lls_nbeam, lcontr_nbeam= main(p="H0",survey="CRACO_alpha1_Planck18_Gamma",sv=55.0,ev=85.0,nv=12,nFRB=100,iFRB=100,nz=500,ndm=1400,nbeams=30)
#pvalues, lls_h, lcontr_h= main(p="H0",survey="CRACO_std_May2022",sv=55.0,ev=85.0,nv=12,nFRB=1000,iFRB=0,nz=5000,ndm=14000,nbeams=5)
lls_high=np.array([-779.3120999489402, -772.9056015228355, -769.489441166127, -768.0507329174199, -767.9288047668712, -768.6856022345145, -770.0264446084614, -771.7505453226654, -773.7196297466046, -775.8377195961393, -778.0378293452067, -780.2730745172248])
#nz=5000 and ndm=14000 takes a long time to run - compute separately!
lls=[lls_default,lls_high,lls_low,lls_nbeam]
```
Loading survey: CRACO_alpha1_Planck18_Gamma
FRB survey sucessfully initialised with 100 FRBs starting from 100
Initialised grid
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 55.0,
"Omega_b": 0.07410861756429751,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
/Users/esanmouli/zdm/zdm/iteration.py:555: RuntimeWarning: divide by zero encountered in log10
Pll=np.log10(Pn)
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 57.72727272727273,
"Omega_b": 0.06727163926832909,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 60.45454545454545,
"Omega_b": 0.06133892643783596,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 63.18181818181818,
"Omega_b": 0.05615776977169298,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 65.9090909090909,
"Omega_b": 0.0516063861954283,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 68.63636363636364,
"Omega_b": 0.047586696625537474,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 71.36363636363636,
"Omega_b": 0.04401899751547243,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 74.0909090909091,
"Omega_b": 0.04083797921483232,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 76.81818181818181,
"Omega_b": 0.03798971568778685,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 79.54545454545455,
"Omega_b": 0.03542936391049404,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 82.27272727272727,
"Omega_b": 0.033119387984459576,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 85.0,
"Omega_b": 0.031028175520000003,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
Max LL at H0=65.9090909090909
Loading survey: CRACO_alpha1_Planck18_Gamma
FRB survey sucessfully initialised with 100 FRBs starting from 100
Initialised grid
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 55.0,
"Omega_b": 0.07410861756429751,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 57.72727272727273,
"Omega_b": 0.06727163926832909,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 60.45454545454545,
"Omega_b": 0.06133892643783596,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 63.18181818181818,
"Omega_b": 0.05615776977169298,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 65.9090909090909,
"Omega_b": 0.0516063861954283,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 68.63636363636364,
"Omega_b": 0.047586696625537474,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 71.36363636363636,
"Omega_b": 0.04401899751547243,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 74.0909090909091,
"Omega_b": 0.04083797921483232,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 76.81818181818181,
"Omega_b": 0.03798971568778685,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 79.54545454545455,
"Omega_b": 0.03542936391049404,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 82.27272727272727,
"Omega_b": 0.033119387984459576,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 85.0,
"Omega_b": 0.031028175520000003,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
Max LL at H0=68.63636363636364
Loading survey: CRACO_alpha1_Planck18_Gamma
FRB survey sucessfully initialised with 100 FRBs starting from 100
Initialised grid
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 55.0,
"Omega_b": 0.07410861756429751,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 57.72727272727273,
"Omega_b": 0.06727163926832909,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 60.45454545454545,
"Omega_b": 0.06133892643783596,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 63.18181818181818,
"Omega_b": 0.05615776977169298,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 65.9090909090909,
"Omega_b": 0.0516063861954283,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 68.63636363636364,
"Omega_b": 0.047586696625537474,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 71.36363636363636,
"Omega_b": 0.04401899751547243,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 74.0909090909091,
"Omega_b": 0.04083797921483232,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 76.81818181818181,
"Omega_b": 0.03798971568778685,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 79.54545454545455,
"Omega_b": 0.03542936391049404,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 82.27272727272727,
"Omega_b": 0.033119387984459576,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
{
"FRBdemo": {
"alpha_method": 1,
"lC": -0.9,
"sfr_n": 0.73,
"source_evolution": 0
},
"IGM": {
"F": 0.32
},
"MW": {
"DMhalo": 50,
"ISM": 35.0
},
"analysis": {
"NewGrids": true,
"sprefix": "Std"
},
"beam": {
"Bmethod": 2,
"Bthresh": 0
},
"cosmo": {
"H0": 85.0,
"Omega_b": 0.031028175520000003,
"Omega_b_h2": 0.0224178568132,
"Omega_k": 0.0,
"Omega_lambda": 0.6888463055445441,
"Omega_m": 0.30966,
"fix_Omega_b_h2": true
},
"energy": {
"alpha": 0.65,
"gamma": -1.01,
"lEmax": 41.4,
"lEmin": 30,
"luminosity_function": 2
},
"host": {
"lmean": 2.18,
"lsigma": 0.48
},
"scat": {
"Sfnorm": 600,
"Sfpower": -4.0,
"Slogmean": 0.7,
"Slogsigma": 1.9
},
"width": {
"Wbins": 10,
"Wlogmean": 1.70267,
"Wlogsigma": 0.899148,
"Wmethod": 2,
"Wscale": 2,
"Wthresh": 0.5
}
}
Max LL at H0=65.9090909090909
```python
from scipy import interpolate
lls_default,lls_high, lls_low, lls_nbeam = lls[0], lls[1], lls[2], lls[3]
H0new=np.arange(55.0,80.0,0.1)
lls_ip=[]
for i,ll in enumerate(lls):
tckj = interpolate.splrep(pvalues,ll, s=0)
lls_ip_i = interpolate.splev(H0new, tckj)
lls_ip.append(lls_ip_i)
lls_default,lls_high, lls_low, lls_nbeam = lls_ip[0], lls_ip[1], lls_ip[2], lls_ip[3]
print ("best fit for lls_default= ",H0new[np.argmax(lls_default)])
print ("best fit for lls_high= ",H0new[np.argmax(lls_high)])
print ("best fit for lls_low= ",H0new[np.argmax(lls_low)])
print ("best fit for lls_nbeam= ",H0new[np.argmax(lls_nbeam)])
lls_default = lls_default - max(lls_default)
lls_low = lls_low - max(lls_low)
lls_nbeam =lls_nbeam - max(lls_nbeam)
lls_high = lls_high - max(lls_high)
H0new_p=H0new-H0new[np.argmax(lls_default)]
plt.plot(H0new_p,lls_default, label= "$N_z$=500 $N_{\mathrm{DM}}$=1400 $N_b$=5", ls='-')
plt.plot(H0new_p,lls_low, label= "$N_z$=250 $N_{\mathrm{DM}}$=700 $N_b$=5", ls='--')
plt.plot(H0new_p,lls_high, label= "$N_z$=5000 $N_{\mathrm{DM}}$=14000 $N_b$=5", ls='-.')
plt.plot(H0new_p,lls_nbeam, label= "$N_z$=500 $N_{\mathrm{DM}}$=1400 $N_b$=30",ls=':')
plt.legend(prop={'size': 11}, loc=8)
plt.xlim(-5.5,5.5)
plt.ylim(-1,0.01)
plt.xlabel("$\Delta H_0$ [km/s/Mpc]")
plt.ylabel("$\log_{10}$ $p(H_0|s,z,DM)$-$\log_{10}$ $p_{\mathrm{max}}$")
#plt.grid()
plt.show()
```
lls_default= 66.50000000000017
lls_high= 64.80000000000014
lls_low= 67.50000000000017
lls_nbeam= 66.40000000000016

```python
#check individual contributions to loglikelihood [p(z|DM), p(DM), p(DM|z), p(z)] when changing ndm
from scipy import interpolate
grid_lls=[]
pvalues=np.linspace(55,80,12)
grid_lls_contr=[]
ndm=np.array([700,1400,5600,9800,14000])
for n in ndm:
pvalues, lls, lcontr= main(p="H0",survey="CRACO_alpha1_Planck18_Gamma",sv=55.0,ev=80.0,nv=12,nFRB=100,iFRB=100,nz=500,ndm=n,nbeams=5)
grid_lls.append(lls)
grid_lls_contr.append(lcontr)
grid_lls_spline=[]
grid_lls_contr_spline=[]
H0new=np.arange(55.0,80.0,0.1)
label_contr=["p(z|DM)", "p(DM)", "p(DM|z)", "p(z)"]
labels=["ndm=700","ndm=1400","ndm=5600","ndm=9800","ndm=14000"]
xlims=[(63,71),(63,69),(63,69),(63,79)]
ylims=[(-0.9,0.05),(-0.3,0.05),(-1,0.05),(-1,0.05)]
grid_lls_contr_t=np.transpose(grid_lls_contr)
for i in range(0,len(grid_lls_contr_t)):
contr=grid_lls_contr_t[i]
#print(np.shape(contr))
for j in range (len(ndm)):
contr_j=np.transpose(contr)
#print(np.shape(contr_j))
tckj = interpolate.splrep(pvalues,contr_j[j], s=0)
ynewj = interpolate.splev(H0new, tckj)
ynewj_p=ynewj-max(ynewj)
plt.plot(H0new,ynewj_p, label=labels[j])
plt.xlabel("H0")
plt.ylabel("log-lik")
plt.title(label_contr[i])
plt.xlim(xlims[i])
plt.ylim(ylims[i])
plt.legend()
plt.show()
```




```python
```
|
FRBsREPO_NAMEzdmPATH_START.@zdm_extracted@zdm-main@papers@H0_I@Analysis@CRACO@[email protected]_END.py
|
{
"filename": "localized_names.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/win32/Demos/security/localized_names.py",
"type": "Python"
}
|
# A Python port of the MS knowledge base article Q157234
# "How to deal with localized and renamed user and group names"
# http://support.microsoft.com/default.aspx?kbid=157234
import sys
import pywintypes
from ntsecuritycon import (
DOMAIN_ALIAS_RID_ADMINS,
DOMAIN_USER_RID_ADMIN,
SECURITY_BUILTIN_DOMAIN_RID,
SECURITY_NT_AUTHORITY,
)
from win32net import NetUserModalsGet
from win32security import LookupAccountSid
def LookupAliasFromRid(TargetComputer, Rid):
# Sid is the same regardless of machine, since the well-known
# BUILTIN domain is referenced.
sid = pywintypes.SID()
sid.Initialize(SECURITY_NT_AUTHORITY, 2)
for i, r in enumerate((SECURITY_BUILTIN_DOMAIN_RID, Rid)):
sid.SetSubAuthority(i, r)
name, domain, typ = LookupAccountSid(TargetComputer, sid)
return name
def LookupUserGroupFromRid(TargetComputer, Rid):
# get the account domain Sid on the target machine
# note: if you were looking up multiple sids based on the same
# account domain, only need to call this once.
umi2 = NetUserModalsGet(TargetComputer, 2)
domain_sid = umi2["domain_id"]
SubAuthorityCount = domain_sid.GetSubAuthorityCount()
# create and init new sid with acct domain Sid + acct Rid
sid = pywintypes.SID()
sid.Initialize(domain_sid.GetSidIdentifierAuthority(), SubAuthorityCount + 1)
# copy existing subauthorities from account domain Sid into
# new Sid
for i in range(SubAuthorityCount):
sid.SetSubAuthority(i, domain_sid.GetSubAuthority(i))
# append Rid to new Sid
sid.SetSubAuthority(SubAuthorityCount, Rid)
name, domain, typ = LookupAccountSid(TargetComputer, sid)
return name
def main():
if len(sys.argv) == 2:
targetComputer = sys.argv[1]
else:
targetComputer = None
name = LookupUserGroupFromRid(targetComputer, DOMAIN_USER_RID_ADMIN)
print(f"'Administrator' user name = {name}")
name = LookupAliasFromRid(targetComputer, DOMAIN_ALIAS_RID_ADMINS)
print(f"'Administrators' local group/alias name = {name}")
if __name__ == "__main__":
main()
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@win32@Demos@security@[email protected]_END.py
|
{
"filename": "positions.py",
"repo_name": "kapteyn-astro/kapteyn",
"repo_path": "kapteyn_extracted/kapteyn-master/kapteyn/positions.py",
"type": "Python"
}
|
#!/usr/bin/env python
#----------------------------------------------------------------------
# FILE: positions.py
# PURPOSE: Provides functions for the conversion of positions to grids
# AUTHOR: M.G.R. Vogelaar, University of Groningen, The Netherlands
# DATE: Nov 20, 2009
# UPDATE: Nov 20, 2009
# VERSION: 0.1
#
# (C) University of Groningen
# Kapteyn Astronomical Institute
# Groningen, The Netherlands
# E: [email protected]
#----------------------------------------------------------------------
"""
Module positions
================
In module :mod:`wcs` we provided two methods of the Projection object for
transformations between pixels and world coordinates. These methods are
:meth:`wcs.Projection.topixel` and :meth:`wcs.Projection.toworld` and they
allow (only) numbers as their input parameters. These transformation methods apply to
the coordinate system for which the Projection object is created and it is not
possible to enter world coordinates from other sky systems or with other units.
Often one wants more flexibility. For instance, in interaction with the user, positions
can be used to plot markers on a map or to preset the location of labels and
graticule lines. But what to do if you have positions that need to be marked and the
positions are from a FK5 catalog while your current map is given in
Galactic coordinates? Or what to do if you need to know,
given a radio velocity, what the optical velocity is for a spectral axis
which has frequency as its primary type? For these situations we
wrote function :func:`str2pos`.
This module enables a user/programmer to specify positions in either
pixel- or world coordinates. Its functionality is provided by a parser
which converts strings with position information into pixel coordinates
and world coordinates. Let's list some options with examples how to use
function :func:`str2pos` which is the most important method in this module.
Assume we have a projection object *pr* and you
want to know the world coordinates *w* and the pixels *p* for a given
string. Further, assume *u* are the units of the world coordinates
and *e* is an error message. Both *u* and *e* are output parameters.
Here are some examples how to use
:func:`str2pos`. We will give detailed descriptions of the options
in later sections.
* | Expressions for the input of **numbers**.
| Example: ``w,p,u,e = str2pos('[pi**2::3], [1:3]', pr)``
* | Use of **physical constants**.
| Example: ``w,p,u,e = str2pos('c_/299792458.0, G_/6.67428e-11', pr)``
* | Use of **units** to set world coordinates
| Example: ``w,p,u,e = str2pos('178.7792 deg 53.655 deg', pr)``
* | **Mix** of pixels and world coordinates.
| Example: ``w,p,u,e = str2pos('5.0, 53.655 deg', pr)``
* | Support of **sky definitions**.
| Example: ``w,p,u,e = str2pos('{eq, B1950,fk4, J1983.5} 178.12830409 {} 53.93322241', pr)``
* | Support for **spectral translations**.
| Example: ``w,p,u,e = str2pos('vopt 1050 km/s', pr)``
* | Coordinates from **text file** on disk.
| Example: ``w,p,u,e = str2pos('readcol("test123.txt", col=2)', pr)``
* | Support for maps with only **one spatial** axis (e.g. XV maps).
| Example: ``w,p,u,e = str2pos('{} 53.655 1.415418199417E+03 Mhz', pr, mixpix=6)``
* | Use of **sexagesimal** notation of spatial world coordinates.
| Example: ``w,p,u,e = str2pos('11h55m07.008s 53d39m18.0s', pr)``
* | Read **header** items.
| Example: ``w,p,u,e = str2pos("{} header('crval1') {} header('crval2')", pr)``
* Units, sky definitions and spectral translations are case insensitive and
**minimal matched** to the full names.
Examine next small script that uses the syntax described in this document to
set marker positions:
**Example: mu_markers.py - Demonstrate the use of strings for a position**
.. literalinclude:: EXAMPLES/mu_markers.py
Introduction
------------
Physical quantities, in a data structure which represents a measurement of an astronomical
phenomenon, are usually
measurements at fixed positions in the sky, sometimes at some spectral value such as a
Doppler shift, frequencies or velocity. These positions are examples of so called
**World Coordinates**. To identify a world coordinate in a measured data structure,
we use a coordinate system based on the pixels in that structure. Often the data
structures are FITS files and the coordinate system is subject to a set of rules.
For FITS files the first pixel on an axis is labeled with coordinate
1 and it runs to the value of *NAXISn* which is a FITS header item
that sets the length of the n-th axis in the data structure.
Assume you have a data structure representing an optical image of a part of the sky
and you need to mark a certain feature in the image or need to retrieve the intensity
of a pixel at a certain location. Then it is possible to identify the pixel using
pixel coordinates. But when you have positions from external sources like
catalogs, then these are not related to a FITS file and therefore given in world
coordinates coupled to a certain coordinate system (e.g. a sky system).
Then it would be convenient if you could specify positions exactly in those coordinates.
This module uses two other modules from the Kapteyn Package:
Module :mod:`wcs` provides methods for conversions between
pixel coordinates and world coordinates given a description of the world coordinate
system as defined in a (FITS) header). Module :mod:`celestial` converts world coordinates
between different sky- and reference systems and/or epochs.
In this module we combine the functionality of :mod:`wcs` and :mod:`celestial`
to write a coordinate parser to convert world coordinates to pixel coordinates (and back)
given a header that describes the WCS.
Note that a description of a world coordinate system can be either derived from a FITS header or
a Python dictionary with FITS keywords.
How to use this module
----------------------
This module is used in several modules of the Kapteyn Package, but
it can also be imported in your own scripts so that you are able to convert
positions (given as a string) to pixel- and world coordinates.
It is also possible to use this module as a test application.
If you want to see the test run then
type: ``python positions.py`` on the command line.
The source of the test strings with positions can be found in function :func:`dotest` in this module.
To get the idea, we list a short example starting with the definition of a header::
from kapteyn import wcs, positions
header = { 'NAXIS' : 2,
'CDELT1' : -1.200000000000E-03, 'CDELT2' : 1.497160000000E-03,
'CRPIX1' : 5, 'CRPIX2' : 6,
'CRVAL1' : 1.787792000000E+02, 'CRVAL2' : 5.365500000000E+01,
'CTYPE1' : 'RA---NCP', 'CTYPE2' : 'DEC--NCP',
'CUNIT1' : 'DEGREE', 'CUNIT2' : 'DEGREE',
'NAXIS1' : 10, 'NAXIS2' : 10,
}
pr = wcs.Projection(header)
w,p,u,e = positions.str2pos('5, 6', pr)
if e == '':
print "pixels:", p
print "world coordinates:", w, u
Its output (which is always a NumPy array) is::
pixels: [[ 5. 6.]]
world coordinates: [[ 178.7792 53.655 ]] ('deg', 'deg')
Remember, *p* are the pixel coordinates, *w* the world coordinates and *u*
is a tuple with units.
We have valid coordinates if the string *e* is empty.
If it is not empty then there is an error condition and the string is an error message.
The parser does not raise exceptions but it stores a message after an exception
in the error message. This is to simplify the use of :func:`str2pos`.
If you want to extract just one position
then give the index in the output array, for example ``W0 = w[0]``. The x and y coordinates
are in this case: ``wx = W0[0]; wy = W0[1]``.
**Structure of output**
The function :func:`str2pos` returns a tuple with four items:
* *w*: an array with positions in world coordinates. One position has
*n* coordinates and *n* is the dimension of your data structure
which 1 for structure with one axis, 2 for a map, 3 for a cube etc.
* *p*: an array with positions in pixel coordinates. It has the same structure
as *w*.
* *u*: an array with the units of the world coordinates
These units are derived from the projection object with
an optional alternative sky system and/or an optional
spectral translation. The number of units in the list is the
number of coordinates in a position.
* *e*: an error message. If the length of this string is not 0, then
it represents an error message and the arrays *w* and *p* are empty.
Position syntax
---------------
Number of coordinates
.......................
A position has the same number of coordinates as the number of axes that are
defined by the Projection object. So each position in a 2-dim map has two coordinates.
One can enter 1 position or a sequence of positions as in:
>>> pos="0,1 4,5 2,3"
Numbers are separated either by a space or a comma.
So also:
>>> pos="0 1 4 5 2 3"
>>> pos="0,1,4,5,2,3"
give the same result.
Numbers in expressions
.......................
Numbers can be given as valid (Python) expressions.
A selection of functions and operators known to module NumPy can be used.
The functions are:
* abs, arccos, arccosh, arcsin, arcsinh, arctan, arctan2,
arctanh, cos, cosh, degrees, exp, log2, log10, mean, median, min, max,
pi, radians, sin, sinc, sqrt, sum, tan, tanh,
rand, randn, ranf, randint, sign
* Aliases: acos = arccos, acosh = arccosh, asin = arcsin,
asinh = arcsinh, atan = arctan, atan2 = arctan2, atanh = arctanh,
ln = log10(x)/log10(e), log=log10, deg=degrees, rad=radians
* arange, linspace
The functions allow a NumPy array as argument. Here its definition starts and
ends with a square bracket. Its elements are separated by a comma.
But note, it is not a Python list.
In addition to the selection of mathematical functions we also include
the functions :func:`arange` and :func:`linspace` from NumPy to
be able to generate arrays.
Examples:
* ``arange(4)`` -> [0, 1, 2, 3]
* ``max(arange(4))`` -> 3
* ``linspace(1,2,5)`` -> [1., 1.25, 1.5, 1.75, 2.]
* ``randint(0,10,3)`` -> [6, 4, 3]
* ``sin(ranf(4))`` -> [0.66019925, 0.24063844, 0.28068498, 0.23582177]
* ``median([-1,3,5,-2,5,1])`` -> 2.0
* ``mean(arange(4))`` -> 1.5
* ``log(10**[1,2,3])`` -> [1, 2, 3]
* ``log(100) log10(100)`` -> [2, 2]
* ``log2(e), ln(e)`` -> [1.44269504, 1.]
* ``log2(2**[1,2,3,4])`` -> [1, 2, 3, 4]
Note the difference between the result of ``[pi]*3`` when ``[pi]`` is a
Python list (then a new list is created with elements [pi,pi,pi]), and
the array ``[pi]``.
The array in our context is multiplied (element-wise) by 3.
This is also true for other operators.
So it is also valid to write:
* ``[1,2,3,4]`` -> [1, 2, 3, 4]
* ``pi*[1,2,3]`` -> [3.14159265, 6.28318531, 9.42477796]
* ``[1,2,3]**2`` -> [1., 4., 9.]
* ``[1,2,3]-100`` -> [-99., -98., -97.]
* ``[1,2,3]/0.3`` -> [ 3.33333333, 6.66666667, 10.]
The array syntax also allows for the generation of ranges.
A range follows the
syntax ``start:end:step`` and *start* may be smaller than *end*. Here we deviate
also from Python. That is, we include always the values *start* and *end* in
the result:
Some examples:
* ``[1:4]`` -> [ 1., 2., 3., 4.]
* ``[-1:-5]`` -> [-1., -2., -3., -4., -5.]
* ``[-1:-5:-2]`` -> [-1., -3., -5.]
* ``[5:1:1]`` -> [] # Note that increment is positive
* ``[1:3, 10:12, 100]`` -> [1., 2., 3., 10., 11., 12., 100.]
* ``[1*pi:2*pi]`` -> [3.14159265, 4.14159265, 5.14159265, 6.14159265, 7.14159265]
If one prefers the *non-inclusive* Python style ranges, then function :func:`arange` is
available. Another function is :func:`linspace` which generates a (given) number of
equidistant samples between a start and end value.
* :func:`arange()`. For example ``arange(1,4)**3`` results in an
array with elements 1, 2, 3 and all these elements are taken to the power of 3
* :func:`linspace`. The arguments for 'linspace' are a start value,
an end value and and the number of samples. For example ``linspace(1,3,4)`` results in an
array with elements 1, 1.66666667, 2.33333333, 3
A range with a number of identical elements is created using a syntax with two
subsequent colons:
* ``[1::3]`` -> [1, 1, 1]
* ``[1**2::2, pi::2]`` -> [1, 1, 3.14159265, 3.14159265]
.. note::
* Functions can have scalars, lists and arrays as arguments.
* Mathematical expressions can be applied on all array elements at the same time.
* Note that x to the power of y is written as x**y and not as
x^y (which is a *bitwise or*).
To get information about NumPy functions you have to read the Python documentation
(e.g. on the command line in a terminal, type: ``ipython``. On the ipython command line
type: ``import numpy; help(numpy.linspace)``).
Here are some examples how to use ranges in the input of positions:
>>> pos = "degrees(pi) e" # pixel coordinates: 180, 2.71828183
>>> pos = "degrees(atan2(1,1)) abs(-10)" # pixel coordinates: 45, 10.
>>> pos = "[pi::3]**2, [1:3]**3"
>>> pos = "[1,6/3,3,4]**3, pi*[1,2,3,4]"
>>> pos = "[1:10], [10,1]"
>>> pos = "[sin(pi):-10:-2] range(6)"
>>> pos = "linspace(0,3,200), tan(radians(linspace(0,3,200)))"
Grouping of numbers
....................
Coordinates can also be **grouped**. Elements in a group are processed in one pass
and they represent only one coordinate in a position.
A group of numbers can be prepended by a sky definition or spectral translation
or be appended by a unit.
Then the unit applies to all the elements in the group. We will see examples of this
in one of the next sections.
For the first example we could have grouped the coordinates as follows:
>>> pos="'0,4,2' '1,5,3'"
or, using the more powerful array generator, as:
>>> pos="[0,4,2] [1,5,3]"
Coordinates enclosed by single quotes or square brackets are parsed
by Python's expression evaluator *eval()* as one expression.
The elements in a group can also be expressions.
If square brackets are part of the expression, the expression represents
a Python list and not an array! Examine the next expressions:
>>> pos = "'[pi]+[1,2]' range(3)" # [pi, 1, 2] [0, 1, 2]
>>> pos = "'[pi]*3' range(3)" # [pi, pi, pi] [0, 1, 2]
>>> pos = "'[sin(x) for x in range(4)]' range(4)"
In this context the square brackets define a list. In the examples we demonstrate
the list operator '+' which concatenates lists, '*' which repeats the elements in a list
and list comprehension.
Note that Python's :func:`eval()` function requires that the elements in an expression
are separated by a comma.
It is important to remember that without quotes, the square brackets define an array.
The list operators '+' and '*' have a different meaning for lists and arrays.
For arrays they add or multiply element-wise as shown in:
>>> pos = "[0,4,2]+10 [1,5,3]*2" # is equivalent with "[10,14,12] [2,10,6]"
Other examples of grouping are listed in the section about reading data from
disk with :func:`readcol()` and in the section about the :func:`eval()` function.
Pixel coordinates
.................
All numbers, in a string representing a position, which are not recognized
as world coordinates are returned as pixel coordinates.
The first pixel on an axis has coordinate 1. Header value *CRPIX* sets the
position of the reference pixel. If this is an integer number, the reference is
located at the center of a pixel. This reference sets the location of of the
world coordinate given in the (FITS) header in keyword *CRVAL*.
For the examples below you should use function :func:`str2pos` to test the conversions.
However, for this function you need a (FITS) header. In the description at :func:`str2pos`
you will find an example of its use.
Examples of two pixel coordinates in a two dimensional world coordinate system (wcs):
>>> pos = "10 20" # Pixel position 10, 20
>>> pos = "10 20 10 30" # Two pixel positions
>>> pos = "(3*4)-5 1/5*(7-2)"
>>> pos = "abs(-10), sqrt(3)"
>>> pos = "sin(radians(30)), degrees(asin(0.5))"
>>> pos = "cos(radians(60)), degrees(acos(0.5))"
>>> pos = "pi, tan(radians(45))-0.5, 3*4,0" # 2 positions
>>> pos = "atan2(2,3), 192"
>>> pos = "[pi::3], [e**2::3]*3" # [pi, pi, pi], [3*e**2, 3*e**2, 3*e**2]
Special pixel coordinates
..........................
For the reference position in a map we can use symbol 'PC' (Projection center).
The center of your data structure is set with symbol 'AC'.
You can use either one symbol or the same number of symbols as there are
axes in your data structure.
>>> pos = "pc" # Pixel coordinates of the reference pixel
>>> pos = "PC pc" # Same as previous. Note case insensitive parsing
>>> pos = "AC" # Center of the map in pixel coordinates
Constants
..........
A number of global constants are defined and these can be used in the
expressions for positions. The constants are case sensitive.
These constants are::
c_ = 299792458.0 # Speed of light in m/s
h_ = 6.62606896e-34 # Planck constant in J.s
k_ = 1.3806504e-23 # Boltzmann in J.K^-1
G_ = 6.67428e-11 # Gravitation in m^3. kg^-1.s^-2
s_ = 5.6704e-8 # Stefan-Boltzmann in J.s^-1.m^-2.K^-4
M_ = 1.9891e+30 # Mass of Sun in kg
P_ = 3.08567758066631e+16 # Parsec in m
World coordinates
..................
World coordinates can be distinguished from pixel coordinates. A world
coordinate is:
* a coordinate followed by a (compatible) unit. Note that the
units of the world coordinate are given in the (FITS) header in keyword *CUNIT*.
If there is no CUNIT in the header or it is an empty string or you
don't remember the units, then use either:
* The wildcard symbol '?'
* A case insensitive minimal match for the string 'UNITS'
* a coordinate prepended by a definition for a sky system or a spectral system.
* a coordinate entered in sexagesimal notation. (hms/dms)
.. note::
One can mix pixel- and world coordinates in a position.
Units
,,,,,,,
For a two dimensional data structure (e.g. an optical image of part of the sky)
we can enter a position in world coordinates as:
>>> pos = 178.7792 deg 53.655 deg
But we can also use compatible units:
>>> pos = "178.7792*60 arcmin 53.655 deg" # Use of a compatible unit if CUNIT is "DEGREE"
>>> pos = "10 1.41541820e+09 Hz" # Mix of pixel coordinate and world coordinate
>>> pos = "10 1.41541820 GHz" # Same position as previous using a compatible unit
Units are minimal matched against a list with known units. The parsing of units
is case insensitive. The list with known units is:
* angles: 'DEGREE','ARCMIN', 'ARCSEC', 'MAS', 'RADIAN'
'CIRCLE', 'DMSSEC', 'DMSMIN', 'DMSDEG', 'HMSSEC', 'HMSMIN', 'HMSHOUR'
* distances: 'METER', 'ANGSTROM', 'NM', 'MICRON', 'MM', 'CM',
'INCH', 'FOOT', 'YARD', 'M', 'KM', 'MILE', 'PC', 'KPC', 'MPC', 'AU', 'LYR'
* time: 'TICK', 'SECOND', 'MINUTE', 'HOUR', 'DAY', 'YR'
* frequency: 'HZ', 'KHZ','MHZ', 'GHZ'
* velocity: 'M/S', 'MM/S', 'CM/S', 'KM/S'
* temperature: 'K', 'MK'
* flux (radio astr.): 'W/M2/HZ', 'JY', 'MJY'
* energy: 'J', 'EV', 'ERG', 'RY'
It is also possible to convert between inverse units like the wave number's 1/m
which, for example, can be converted to 1/cm.
For a unit, one can also substitute the wildcard symbol '?'. This is the same as
setting the units from the header (conversion factor is 1.0). The symbol is
handy to set coordinates to world coordinates. But it is essential if there are
no units in the header like the unitless STOKES axis. One can also use the string
*units* which has the same role as '?'.
>>> pos = "[0, 3, 4] ?"
>>> pos = "7 units"
>>> pos = "[5, 6.3] U"
Sky definitions
,,,,,,,,,,,,,,,,,
The detailed information about sky definitions can be found in:
* :ref:`celestial-skysystems`
* :ref:`celestial-refsystems`
* :ref:`celestial-epochs`
If a coordinate is associated with a sky definition it is parsed as a world coordinate.
A sky definition is either a case insensitive minimal match from the list::
'EQUATORIAL','ECLIPTIC','GALACTIC','SUPERGALACTIC'
or it is a definition between curly brackets which can contain one or
more items from the following list:
*sky system, reference system, equinox* and *epoch of observation*.
An empty string between curly brackets e.g. {}, followed by a number,
implies a world coordinate in the native sky system.
Examples:
>>> pos = "{eq} 178.7792 {} 53.655"
# As a sky definition between curly brackets
>>> pos = "{} 178.7792 {} 53.655"
# A world coordinate in the native sky system
>>> pos = "{eq,B1950,fk4} 178.12830409 {} 53.93322241"
# With sky system, reference system and equinox
>>> pos = "{fk4} 178.12830409 {} 53.93322241"
# With reference system only.
>>> pos = "{eq, B1950,fk4, J1983.5} 178.1283 {} 53.933"
# With epoch of observation (FK4 only)
>>> pos = "{eq B1950 fk4 J1983.5} 178.1283 {} 53.933"
# With space as separator
>>> pos = "ga 140.52382927 ga 61.50745891"
# Galactic coordinates
>>> pos = "ga 140.52382927 {} 61.50745891"
# Second definition copies from first
>>> pos = "su 61.4767412, su 4.0520188"
# Supergalactic
>>> pos = "ec 150.73844942 ec 47.22071243"
# Ecliptic
>>> pos = "{} 178.7792 6.0"
# Mix world- and pixel coordinate
>>> pos = "5.0, {} 53.655"
# Mix with world coordinate in native system
.. note::
* Mixing sky definitions for one position is not allowed i.e. one cannot
enter *pos = "ga 140.52382927 eq 53.655"*
* If you mix a pixel- and a world coordinate in a spatial system
then this world coordinate must be defined in the native system, i.e. *{}*
We can also specify positions in data structures with only one spatial axis
and a non-spatial axis (e.g. position velocity diagrams). The conversion function
:func:`str2pos` needs a pixel coordinate for the missing spatial axis.
If one of the axes is a spectral axis, then one can enter world coordinates
in a compatible spectral system:
>>> pos = "{} 53.655 1.415418199417E+09 hz"
# Spatial and spectral world coordinate
>>> pos = "{} 53.655 1.415418199417E+03 Mhz"
# Change Hz to MHz
>>> pos = "53.655 deg 1.415418199417 Ghz"
# to GHz
>>> pos = "{} 53.655 vopt 1.05000000e+06"
# Use spectral translation to enter optical velocity
>>> pos = "{} 53.655 , vopt 1050 km/s"
# Change units
>>> pos = "10.0 , vopt 1050000 m/s"
# Combine with a pixel position
>>> pos = "{} 53.655 vrad 1.05000000e+06"
# Radio velocity
>>> pos = "{} 53.655 vrad 1.05000000e+03 km/s"
# Radio velocity with different unit
>>> pos = "{} 53.655 FREQ 1.41541820e+09"
# A Frequency
>>> pos = "{} 53.655 wave 21.2 cm"
# A wave length with alternative unit
>>> pos = "{} 53.655 vopt c_/285.51662
# Use speed of light constant to get number in m/s
.. note::
For positions in a data structure with one spatial axis, the other
(missing) spatial axis is identified by a pixel coordinate. Usually it's
a slice).
This restricts the spatial world coordinates to their native wcs.
We define a world coordinate in its native sky system
with *{}*
.. note::
A sky definition needs not to be repeated. Only one definition is allowed
in a position. The second definition therefore can be empty as in *{}*.
.. note::
World coordinates followed by a unit, are supposed to be compatible
with the Projection object. So if you have a header with spectral type FREQ but
with a spectral translation set to VOPT, then ``"{} 53.655 1.415418199417E+09 hz"``
is invalid, ``"10.0 , vopt 1050000 m/s"`` is ok and
also ``"{} 53.655 FREQ 1.415418199417e+09"`` is correct.
Sexagesimal notation
,,,,,,,,,,,,,,,,,,,,,,,
Read the documentation at :func:`parsehmsdms` for the details.
Here are some examples:
>>> pos = "11h55m07.008s 53d39m18.0s"
>>> pos = "{B1983.5} 11h55m07.008s {} -53d39m18.0s"
>>> pos = -33d 0d
Reading from file with function *readcol()*, *readhms()* and *readdms()*
..........................................................................
Often one wants to plot markers at positions that are stored in a text
file (Ascii) on disk.
In practice one can encounter many formats for coordinates in text files.
Usually these coordinates are written in columns. For example one can expect
longitudes in degrees in the first column and latitudes in degrees in the second.
But what do these coordinates represent? Are they galactic or ecliptic positions?
If your current plot represents an equatorial system can we still plot the markers
from the file if these are given in the galactic sky system? And there are more
questions:
* Assume you have a file with three columns with hours, minutes and seconds as longitude
and three columns with degrees, minutes and seconds as latitude. Is it possible
to read these columns and combine them into longitudes and latitudes?
Assume you have a file and the Right Ascensions are given in decimal hours,
is it possible to convert those to degrees?
* Assume your file has numbers that are in a unit that is not the same unit
as the axis unit in your plot. Is it possible to change the units of the
data of the column in the text file?
* Assume you have several (hundreds of) thousands marker positions.
Is reading the marker positions fast?
* If a file has comment lines that start with another symbol than '!' or '#',
can one still skip the comment lines?
* If a file has columns separated by something else than whitespace,
is it still possible then to read a column?
All these questions can be answered with *yes* if you use this module.
We provided three functions: :func:`readcol()`, :func:`readhms()` and :func:`readdms()`.
These functions are based on module :mod:`tabarray`. The routines in this
module are written in C and as a result of that, reading data from file is very fast.
The arguments of these functions are derived from those in
:func:`tabarray.readColumns` with the exception that
argument *cols=* is replaced by *col=* for function *readcol()* because
we want to read only one column per coordinate to keep the syntax
easy and flexible.
In the functions :func:`readhms()` and :func:`readdms()`, which are
also based on :func:`tabarray.readColumns`, the *cols=* argument is replaced by
arguments *col1=, col2=, col3=*. These functions read three columns at once and
combine the columns into one.
Tabarray routines count with 0 as the first column, first row etc. The routines
that we describe here count with 1 as the first column or row etc.
**syntax**
>>> readcol(filename, col=1, fromline=None, toline=None, rows=None, comment="!#",
sepchar=', t', bad=999.999, fromrow=None, torow=None, rowstep=None)
>>> readhms(filename, col1=1, col2=2, col3=3,
fromline=None, toline=None, rows=None, comment="!#",
sepchar=', t', bad=999.999,
fromrow=None, torow=None, rowstep=None)
Function :func:`readdms()` has the same syntax as :func:`readhms()`
The parameters are:
* filename - a string with the name of a text file containing the table.
The string must be entered with double quotes. Single quotes
have a different function in this parser (grouping).
* col - a scalar that sets the column number.
* fromline - Start line to be read from file (first is 1).
* toline - Last line to be read from file. If not specified, the end of the file is assumed.
* comment - a string with characters which are used to designate comments in the input file. The occurrence of any of these characters on a line causes the rest of the line to be ignored. Empty lines and lines containing only a comment are also ignored.
* sepchar - a string containing the column separation characters to be used. Columns are separated by any combination of these characters.
* rows - a tuple or list containing the row numbers to be extracted.
* bad - a number to be substituted for any field which cannot be decoded as a number.
The default value is 999.999
* fromrow - number of row from the set of lines with real data to start reading
* torow - number of row from the set of lines with real data to end reading. The *torow* line
is included.
* rowstep - Step size in rows. Works also if no values are given for *fromrow* and *torow*.
There is a difference between the *rows=* and the *fromline=* , *toline=*
keywords. The first reads the specified rows from the *parsed* contents
of the file( (*parsed* contents are lines that are not comment lines), while the line keywords specify which lines you want to read from file.
Usually comment characters '#' and '!' are used. If you expect another comment
character then change this keyword.
Keyword *sepchar=* sets the separation character. The default is a comma,
a space and a tab. *bad=* is the value
that is substituted for values that could not be parsed so that they can be
easily identified.
.. note::
* Numbering of columns start with 1
* Numbering of rows start with 1
* Numbering of lines start with 1
* The result is an array so it can be used in an expression
Some examples:
Assume a text file on disk with a number of rows with 2 dimensional marker positions
in pixel coordinates. The text file is called *pixmarks.txt*.
Then the simplest line to read this data is:
>>> pos = 'readcol("pixmarks.txt") readcol("pixmarks.txt",2)'
>>> annim.Marker(pos=pos, marker='o', markersize=10, color='r')
All parameters have defaults except the filename parameter.
The default column is 1, i.e. the first column.
For readability we prefer to write the positions as:
>>> pos = 'readcol("pixmarks.txt", col=1) readcol("pixmarks.txt",col=2)'
If you want all the data up to line 30 (and line 30 including) you should write:
>>> pos = 'readcol("pixmarks.txt", col=1, toline=30) readcol("pixmarks.txt",col=2, toline=30)'
If your file has relevant data from line 30 to the end of the file, one should write:
>>> pos = 'readcol("pixmarks.txt", col=1, fromline=30) readcol("pixmarks.txt",col=2, fromline=30)'
As stated earlier, we distinguish *lines* and *rows* in a file.
Lines are also those which are empty or which start with a comment.
Rows are only those lines with data. So if you want to read only the first
5 rows of data, then use:
>>> pos = 'readcol("pixmarks.txt", col=1, torow=5) readcol("pixmarks.txt",col=2, torow=5)'
Note that the parameters *toline* and *torow* include the given value. You can specify
a range of rows including a step size with:
>>> pos = 'readcol("pixmarks.txt", col=1, fromrow=10, torow=44, rowstep=2), .....'
to get row number 10, 12, ..., 44. Note that it is not possible to set a
step size if you use the *fromline* or *toline* parameter.
In some special circumstances you want to be able to read only
preselected rows from the data lines. Assume a user needs rows 1,3,7,12,44.
Then the position string should be:
>>> pos = 'readcol("pixmarks.txt", col=1, rows=[1,3,7,12,44]), .....'
Perhaps you wonder why you need to repeat the :func:`readcol` function for
each coordinate. It is easier to use it once and specify two columns instead
of one. We did not implement this feature because usually one will read world coordinates
from file and often we want to add units or a sky- or spectral conversion.
Then you must be able to read the data for each column separately.
Assume we have a file on disk called 'lasfootprint' which stores two sets
of 2 dimensional positions (i.e. 4 coordinates) separated by an empty line.
::
# RA J2000 Dec l b eta lambda
8.330 -1.874 225.624 19.107 -36.250 300.000
8.663 -2.150 228.598 23.268 -36.250 305.000
8.996 -2.409 231.763 27.369 -36.250 310.000
9.329 -2.651 235.170 31.394 -36.250 315.000
9.663 -2.872 238.878 35.320 -36.250 320.000
..... ......
.....
It has a blank line at line 63. The first column represents Right Ascensions in
decimal hours.
If we want to read the positions given by column 1 and 2 of the second
segment (starting with line 66)
and column 1 is given in decimal hours, then you need the command:
>>> pos= 'readcol("lasfootprint", col=1,fromline=64)
HMShour readcol("lasfootprint", col=2,fromline=64) deg'
The first coordinate is followed by a unit, so it is a world coordinate.
We have a special unit that converts from decimal hours to degrees (*HMSHOUR*).
The last coordinate is followed by a unit (deg) so it is a world coordinate.
It was also possible to prepend the second coordinate with {} and omit the unit as in:
Between the brackets there is nothing specified. This means that we assume
the coordinates in the file (J2000) match the sky system of the world
coordinate system of your map.
>>> pos= 'readcol("lasfootprint", 1,64) HMShour {} readcol("lasfootprint", 2,64)'
Note that the third parameter is the *fromline* parameter.
If columns 3 and 4 in the file are galactic longitudes and latitudes, but
our basemap is equatorial, then we could have read the positions
with an alternative sky system as in (now we read the first data segment):
>>> pos= '{ga} readcol("lasfootprint", 3, toline=63) {} readcol("lasfootprint", 4, toline=63)'
The second sky definition is empty which implies a copy of the first
definition (i.e. {ga}).
.. note::
The sky definition must describe the world coordinate system of the
data on disk. It will be automatically converted to a position in
the sky system of the Projection object which is associated with
your map or axis.
Some files have separate columns for hour, degrees, minutes and seconds.
Assume you have an ASCII file on disk with 6 columns representing
sexagesimal coordinates. For example:
::
! Test file for Ascii data and the READHMS/READDMS command
11 57 .008 53 39 18.0
11 58 .008 53 39 17.0
11 59 .008 53 39 16.0
....
Assume that this file is called *hmsdms.txt* and it contains equatorial
coordinates in *'hours minutes seconds degrees minutes seconds'* format,
then read this data with:
>>> pos= '{} readhms("hmsdms.txt",1,2,3) {} readdms("hmsdms.txt",4,5,6)'
Or with explicit choice of which lines to read:
>>> pos= '{} readhms("hmsdms.txt",1,2,3,toline=63) {} readdms("hmsdms.txt",4,5,6,toline=63)'
The data is automatically converted to degrees.
What if the format is **'d m s d m s'** and the coordinates are galactic.
Then we should enter;
>>> pos= 'ga readdms("hmsdms.txt",1,2,3) ga readdms("hmsdms.txt",4,5,6)'
if your current sky system is galactic then it also possible to enter:
>>> pos= 'readdms("hmsdms.txt",1,2,3) deg readdms("hmsdms.txt",4,5,6) deg'
If the columns are not in the required order use the keyword names:
>>> pos= 'readdms("hmsdms.txt",col3=0,col2=1,col3=2) deg readdms("hmsdms.txt",4,5,6) deg'
The result of one of the functions described in this section is an array and therefore
suitable to use in combination with functions and operators:
>>> pos='1.1*readhms("hmsdms.txt",1,2,3)-5 sin(readdms("hmsdms.txt",4,5,6)-10.1)'
Reading header items with function *header()*
..............................................
Command *header* reads an item from the header that was used to create the Projection
object. The header item must represent a number.
>>> pos= 'header("crpix1") header("crpix2")'
.. note::
* Header keys are case insensitive
* A key must be given with double quotes
Parser errors messages
.......................
The position parser is flexible but there are some rules. If the input
cannot be transformed into coordinates then an appropriate message will be
returned. In some cases the error message seems not to be related to the problem
but that seems often the case with parsers. If a number is wrong, the parser tries
to parse it as a sky system or a unit. If it fails, it will complain about
the sky system or the unit and not about the number.
Testing the parser
...................
You can run the module's 'main' (i.e. execute the module) to test pre installed
expressions and to experiment with your own positions entered at a prompt.
Please copy the module *positions.py* to your working directory first!
The program will display
a couple of examples before it prompts for user input. Then your are prompted
to enter a string (no need to enclose it with quotes because it is read as a string).
Enter positions for a two dimensional data structure with axes R.A. and Dec.
Start the test with:
>>> python positions.py
GIPSY's grids mode
......................
FITS pixel coordinates start with number one and the last pixel
for axis n is the value of header item *NAXISn*. Pixel value
*CRPIXn* is the pixel that corresponds to *CRVALn*. The value
of *CRPIXn* can be non-integer.
There are also systems that implement a different numbering.
For example the Groningen Image Processing SYstem (GIPSY) uses an offset.
There we call pixel *CRPIXn* grid 0, so
grid 0 corresponds to *CRVALn*. It has the advantage that these grid coordinates
are still valid after cropping the input data. For FITS data we need to change
the value for *CRPIXn* after slicing the data and writing it to a new FITS file.
But then your original pixel coordinates for the same positions need to be shifted too.
The Projection object can be set into GIPSY's grid mode using attribute
:attr:`gridmode` (True or False).
Functions
---------
.. autofunction:: str2pos
.. autofunction:: parsehmsdms
.. autofunction:: unitfactor
"""
# Imports
from re import split as re_split
from re import findall as re_findall
from string import whitespace, ascii_uppercase
import six
from numpy import nan as unknown
from numpy import asarray, zeros, floor, array2string
from numpy import array, ndarray
from kapteyn import wcs # The WCSLIB binding
from kapteyn.celestial import skyparser
# Next functions are imported for eval()
from kapteyn.tabarray import readColumns
from numpy import arange, linspace
from numpy import abs, arccos, arccosh, arcsin, arcsinh, arctan, arctan2
from numpy import arctanh, cos, cosh, degrees, exp, log2, log10, mean, median, min, max
from numpy import pi, radians, sin, sinc, sqrt, sum, tan, tanh, sign
from numpy.random import rand, randn, ranf, randint
# py2/3 comp:
#from operator import isSequenceType
def isSequenceType(obj):
try:
from collections import Sequence
except ImportError:
from operator import isSequenceType
return operator.isSequenceType(obj)
else:
return isinstance(obj, Sequence)
# Euler's number
e = 2.7182818284590452353602874713527
# Function aliases
acos = arccos
acosh = arccosh
asin = arcsin
asinh = arcsinh
atan = arctan
atan2 = arctan2
atanh = arctanh
log = log10
deg = degrees
rad = radians
badval = 99999.999 # Set bad number in tabarray routines to this value
sepchar=', \t' # Default separation characters for readcol function
def ln(x):
return log10(x)/log10(e)
class __a(object):
#-------------------------------------------------------
# Create array objects with square bracket syntax
# Allow for lists in a list.
#-------------------------------------------------------
def __init__(self,inclusive=False):
self.incl = int(inclusive)
def __getitem__(self, key):
if not isSequenceType(key):
key = (key,)
result = []
for element in key:
if isinstance(element, slice):
startval = float(element.start)
if element.stop is None: # v::n
for value in [element.start]*element.step:
result.append(value)
else:
endval = float(element.stop)
if element.step is not None: # va:vb:incr
incr = float(element.step)
elif startval>endval: # va:vb
incr = -1.0
else:
incr = +1.0
endval = endval+0.5*self.incl*incr
for value in arange(startval, endval, incr):
result.append(value)
elif isSequenceType(element):
for value in element:
result.append(float(value))
else:
result.append(float(element))
return array(result)
#ar = __a(inclusive=False) # We don't need this at the moment
a = __a(inclusive=True)
# Define constants for use in eval()
c_ = 299792458.0 # Speed of light in m/s
h_ = 6.62606896e-34 # Planck constant in J.s
k_ = 1.3806504e-23 # Boltzmann in J.K^-1
G_ = 6.67428e-11 # Gravitation in m^3. kg^-1.s^-2
s_ = 5.6704e-8 # Stefan- Boltzmann in J.s^-1.m^-2.K^-4
M_ = 1.9891e+30 # Mass of Sun in kg
P_ = 3.08567758066631e+16 # Parsec in m
def issequence(obj):
return isinstance(obj, (list, tuple, ndarray))
def usermessage(token, errmes):
return "Error in '%s': %s" % (token, errmes)
def readcol(filename, col=1, fromline=None, toline=None, rows=None, comment="!#",
sepchar=sepchar, bad=badval, fromrow=None, torow=None, rowstep=None):
#-------------------------------------------------------------
"""
Utility to prepare a call to tabarray's readColumns() function
We created a default for the 'comment' argument and changed the
column argument to accept only one column.
"""
#-------------------------------------------------------------
if issequence(col):
column = col[0]
else:
column = col
column = [column-1] # First column is 1 but for readColumns it must be 0
if rows != None:
if not issequence(rows):
rows = [rows]
rows = [i-1 for i in rows]
lines = None
if not fromline is None or not toline is None:
if fromline is None:
fromline = 0
if toline is None:
toline = 0
lines = (fromline, toline)
rowslice = (None, )
if not fromrow is None or not torow is None or not rowstep is None:
if not fromrow is None:
fromrow -= 1
rowslice = (fromrow, torow, rowstep)
colslice = (None, )
c = readColumns(filename=filename, comment=comment, cols=column, sepchar=sepchar,
rows=rows, lines=lines, bad=bad, rowslice=rowslice, colslice=colslice)
return c.flatten()
def readhmsdms(filename, col1=1, col2=2, col3=3,
fromline=None, toline=None, rows=None, comment="!#",
sepchar=sepchar, bad=badval, fromrow=None, torow=None, rowstep=None, mode='hms'):
#-------------------------------------------------------------
"""
Helper function for readhms() and readdms()
"""
#-------------------------------------------------------------
column = [col1-1, col2-1, col3-1] # Make it zero based for readColumns()
if rows != None:
if not issequence(rows):
rows = [rows]
rows = [i-1 for i in rows]
lines = None
if not fromline is None or not toline is None:
if fromline is None:
fromline = 0
if toline is None:
toline = 0
lines = (fromline, toline)
rowslice = (None, )
if not fromrow is None or not torow is None or not rowstep is None:
if not fromrow is None:
fromrow -= 1
rowslice = (fromrow, torow, rowstep)
colslice = (None, )
c = readColumns(filename=filename, comment=comment, cols=column, sepchar=sepchar,
rows=rows, lines=lines, bad=bad, rowslice=rowslice, colslice=colslice)
if mode == 'hms':
h = c[0]; m = c[1]; s = c[2]
vals = (h+m/60.0+s/3600.0)*15.0
else:
d = c[0]; m = c[1]; s = c[2]
# Take care of negative declinations
vals = sign(d)*(abs(d)+abs(m)/60.0+abs(s)/3600.0)
return asarray(vals)
def readhms(filename, col1=1, col2=2, col3=3,
fromline=None, toline=None, rows=None, comment="!#",
sepchar=sepchar, bad=badval, fromrow=None, torow=None, rowstep=None):
#-------------------------------------------------------------
"""
Utility to prepare a call to tabarray's readColumns() function
We created a default for the 'comment' argument and changed the
column argument to accept only one column.
"""
#-------------------------------------------------------------
return readhmsdms(filename=filename, col1=col1, col2=col2, col3=col3,
fromline=fromline, toline=toline, rows=rows, comment=comment,
sepchar=sepchar, bad=bad, fromrow=fromrow, torow=torow, rowstep=rowstep,
mode='hms')
def readdms(filename, col1=1, col2=2, col3=3,
fromline=None, toline=None, rows=None, comment="!#",
sepchar=sepchar, bad=badval, fromrow=None, torow=None, rowstep=None):
#-------------------------------------------------------------
"""
Utility to prepare a call to tabarray's readColumns() function
We created a default for the 'comment' argument and changed the
column argument to accept only one column.
"""
#-------------------------------------------------------------
return readhmsdms(filename=filename, col1=col1, col2=col2, col3=col3,
fromline=fromline, toline=toline, rows=rows, comment=comment,
sepchar=sepchar, bad=bad, fromrow=fromrow, torow=torow, rowstep=rowstep,
mode='dms')
source = {}
def header(key):
#-------------------------------------------------------------
"""
This function should be used as method of the Coordparser
routine.
However we need it here to be able to use it in the
restricted version of eval(). It must read its items from
a header, so we made this header global. It is set in the
Coordparser method.
"""
#-------------------------------------------------------------
return float(source[key.upper()])
# Restrict available functions etc. to eval()
eval_restrictlist = ['arange', 'linspace',
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2',
'arctanh', 'cos', 'cosh', 'degrees', 'exp', 'log2', 'log10',
'mean', 'median', 'min', 'max',
'pi', 'radians', 'sin', 'sinc', 'sqrt', 'sum', 'tan', 'tanh',
'rand', 'randn', 'ranf', 'randint', 'acos', 'acosh', 'asin', 'asinh',
'atan', 'atan2', 'atanh', 'e', 'a', 'ln', 'log', 'deg', 'rad', 'sign',
'readcol', 'readhms', 'readdms', 'header',
'c_', 'h_', 'k_', 'G_', 's_', 'M_', 'P_']
# Filter the local namespace
eval_dict = dict([(k, locals().get(k, None)) for k in eval_restrictlist])
# We need some builtins
eval_dict['abs'] = abs
eval_dict['range'] = range
def eval_restrict(arg):
return eval(arg, {"__builtins__":None}, eval_dict)
def minmatch(userstr, mylist, case=0):
#--------------------------------------------------------------
"""
Purpose: Given a list 'mylist' with strings and a search string
'userstr', find a -minimal- match of this string in
the list.
Inputs:
userstr- The string that we want to find in the list of strings
mylist- A list of strings
case- Case insensitive search for case=0 else search is
case sensitive.
Returns: 1) None if nothing could be matched
2) -1 if more than one elements match
3) >= 0 the index of the matched list element
"""
#--------------------------------------------------------------
indx = None
if case == 0:
ustr = userstr.upper()
else:
ustr = userstr
for j, tr in enumerate(mylist):
if case == 0:
liststr = tr.upper()
else:
liststr = tr
if ustr == liststr:
indx = j
break
i = liststr.find(ustr, 0, len(tr))
if i == 0:
if indx == None:
indx = j
else:
indx = -1
return indx
def unitfactor(unitfrom, unitto):
#-----------------------------------------------------------------------------------
"""
Return the conversion factor between two units.
:param unitfrom:
Units to convert from. Strings with '1/unit' or '/unit' are
also allowed. If this parameter is '?' then the incoming
unit is a wildcard character and the conversion factor 1.0
is returned. The same holds for a case insensitive minimum match
of the string 'UNITS'. This option is necessary for the option
to use world coordinates when there are no units given in the header
of the data (i.e. there is no CUNITn keyword or its contents is empty).
:type unitfrom: String
:param unitto:
Units to convert to. Strings with '1/unit' or '/unit' are
also allowed.
:type axtype: String
:Returns:
The conversion factor to convert a number in 'unitsfrom'
to a number in 'unitsto'.
:Notes:
:Examples:
>>> print unitfactor('1/m', '1/km')
(1000.0, '')
>>> print positions.unitfactor('1/mile', '1/km')
(0.62137119223733395, '')
>>> print positions.unitfactor('mile', 'km')
(1.6093440000000001, '')
"""
#-----------------------------------------------------------------------------------
errmes = ''
# Process the wildcard options
if unitfrom == '?':
# Then the wildcard was used to set the unit
return 1.0, errmes
i = minmatch(unitfrom, ['UNITS'])
if i != None and i >= 0:
# Then user entered a string that sets the conversion factor to 1
return 1.0, errmes
units = {'DEGREE' : (1, 1.0),
'ARCMIN' : (1, 1.0/60.0),
'ARCSEC' : (1, 1.0/3600.0),
'MAS' : (1, 1.0 / 3600000.0),
'RADIAN' : (1, 57.2957795130823208767),
'CIRCLE' : (1, 360.0),
'DMSSEC' : (1, 0.0002777777777777778),
'DMSMIN' : (1, 0.0166666666666666667),
'DMSDEG' : (1, 1.0000000000000000000),
'HMSSEC' : (1, 15.0*0.0002777777777777778),
'HMSMIN' : (1, 15.0*0.0166666666666666667),
'HMSHOUR': (1, 15.0000000000000000000),
'METER' : (2, 1.0000000000000000000),
'ANGSTROM' : (2, 0.0000000001000000000),
'NM' : (2, 0.0000000010000000000),
'MICRON' : (2, 0.0000010000000000000),
'MM' : (2, 0.0010000000000000000),
'CM' : (2, 0.0100000000000000000),
'INCH' : (2, 0.0254000000000000000),
'FOOT' : (2, 0.3048000000000000000),
'YARD' : (2, 0.9144000000000000000),
'M' : (2, 1.0000000000000000000),
'KM' : (2, 1000.0000000000000000000),
'MILE' : (2, 1609.3440000000000000000),
'PC' : (2, 30800000000000000.0000000000000000000),
'KPC' : (2, 30800000000000000000.0000000000000000000),
'MPC' : (2, 30800000000000000000000.0000000000000000000),
'AU' : (2, 1.49598e11),
'LYR' : (2, 9.460730e15),
'TICK' : (3, 1.0000500000000000000),
'SECOND' : (3, 1.0000000000000000000),
'MINUTE' : (3, 60.0000000000000000000),
'HOUR' : (3, 3600.0000000000000000000),
'DAY' : (3, 86400.0000000000000000000),
'YR' : (3, 31557600.0000000000000000000),
'HZ' : (4, 1.0000000000000000000),
'KHZ' : (4, 1000.0000000000000000000),
'MHZ' : (4, 1000000.0000000000000000000),
'GHZ' : (4, 1000000000.0000000000000000000),
'M/S' : (5, 1.0000000000000000000),
'MM/S' : (5, 0.0010000000000000000),
'CM/S' : (5, 0.0100000000000000000),
'KM/S' : (5, 1000.0000000000000000000),
'K' : (6, 1.0000000000000000000),
'MK' : (6, 0.0010000000000000000),
'W/M2/HZ': (7, 1.0),
'JY' : (7, 1.0e-26 ), # Watts / m^2 / Hz
'MJY' : (7, 1.0e-29 ),
'TAU' : (9, 1.000000000000000000),
'J' : (10, 1.0),
'EV': (10, 1.60217733e-19),
'ERG': (10, 1.0e-7),
'RY' : (10, 2.179872e-18),
'UNITS': (11, 1.0)
}
# There is a special case for units like 1/m or /m
# Then the factor needs to be inverted.
inverse = inversefrom = inverseto = False
if unitfrom.startswith('/') or unitfrom.startswith('1/'):
inversefrom = True
if unitto.startswith('/') or unitto.startswith('1/'):
inverseto = True
if (inversefrom and not inverseto) or (inverseto and not inversefrom):
errmes = "[%s] cannot be converted to [%s]" % (unitfrom, unitto)
return None, errmes
inverse = inversefrom and inverseto
if inverse:
unitfrom = unitfrom.split('/')[1]
unitto = unitto.split('/')[1]
mylist = list(units.keys())
i = minmatch(unitfrom, mylist)
if i != None:
if i >= 0:
key = list(units.keys())[i]
typ1 = units[key][0]
fac1 = units[key][1]
else:
errmes = "Ambiguous unit [%s]" % unitto
return None, errmes
else:
errmes = "[%s] should be a unit but is unknown!" % unitfrom
return None, errmes
i = minmatch(unitto, mylist)
if i != None:
if i >= 0:
key = list(units.keys())[i]
typ2 = units[key][0]
fac2 = units[key][1]
else:
errmes = "Ambiguous unit [%s]" % unitto
return None, errmes
else:
errmes = "[%s] should be a unit but is unknown!" % unitto
return None, errmes
if typ1 == typ2:
unitfactor = fac1 / fac2
else:
errmes = "Cannot convert between [%s] and [%s]" % (unitfrom, unitto)
return None, errmes
if inverse:
unitfactor = 1.0/unitfactor
return unitfactor, errmes
def nint(x):
"""--------------------------------------------------------------
Purpose: Calculate a nearest integer compatible with then
definition used in GIPSY's coordinate routines.
Inputs:
x- A floating point number to be rounded to the nearest
integer
Returns: The nearest integer for 'x'.
Notes: This definition adds a rule for half-integers. This
rule is implemented with function floor() which implies
that the left side of a pixel, in a sequence of
horizontal pixels, belongs to the pixel while the
right side belongs to the next pixel. This definition
of a nearest integer differs from the Fortran
definition used in pre-April 2009 versions of GIPSY.
-----------------------------------------------------------------"""
return floor(x+0.5)
def parseskysystem(skydef):
#--------------------------------------------------------------------
"""
Helper function for skyparser()
"""
#--------------------------------------------------------------------
try:
sky = skyparser(skydef)
return sky, ""
except ValueError as message:
errmes = str(message)
return None, errmes
def parsehmsdms(hmsdms, axtyp=None):
#--------------------------------------------------------------------
"""
Given a string, this routine tries to parse its contents
as if it was a spatial world coordinate either in
hours/minutes/seconds format or degrees/minutes/seconds
format.
:param hmsdms:
A string containing at least a number followed by
the character 'h' or 'd' (case insensitive) followed by
a number and character 'm'. This check must be performed
in the calling environment.
The number can be a negative value. The string cannot
contain any white space.
:type hmsdms: String
:param axtype:
Distinguish formatted coordinates for longitude and latitude.
:type axtype: String
:Returns:
The parsed world coordinate in degrees and an empty error message
**or**
*None* and an error message that the parsing failed.
:Notes:
A distinction has been made between longitude axes and
latitude axes. The hms format can only be used on longitude
axes. However there is no check on the sky system (it should
be equatorial).
The input is flexible (see examples), even expressions are allowed.
:Examples:
>>> hmsdms = '20h34m52.2997s'
>>> hmsdms = '60d9m13.996s'
>>> hmsdms = '20h34m52.2997' # Omit 's' for seconds
>>> hmsdms = '60d9m13.996'
>>> hmsdms = '20h34m60-7.7003' # Expression NOT allowed
>>> hmsdms = '-51.28208458d0m' # Negative value for latitude
* The 's' for seconds is optional
* Expressions in numbers are not allowed because we cannot use Python's
eval() function, because this function interprets expressions like '08'
differently (octal).
* dms format always allowed, hms only for longitude axes.
Both minutes and seconds are optional. The numbers
need not to be integer.
"""
#-----------------------------------------------------------------------
if ('h' in hmsdms or 'H' in hmsdms) and axtyp != None and axtyp != 'longitude':
return None, "'H' not allowed for this axis"
parts = re_split('([hdmsHDMS])', hmsdms.strip()) # All these characters can split the string
number = 0.0
total = 0.0
sign = +1 # Keep track of the sign
lastdelim = ' '
prevnumber = True
converthour2deg = False
for p in parts:
try:
# Use float and not eval because eval cannot convert '08' like numbers
number = float(p) # Everything that Python can parse in a number
prevnumber = True
adelimiter = False
except:
f = None
if not p in whitespace:
delim = p.upper()
if delim == 'H':
converthour2deg = True
f = 3600.0
elif delim == 'D':
f = 3600.0
elif delim == 'M':
f = 60.0
elif delim == 'S':
f = 1.0
else:
return None, "Invalid syntax for hms/dms"
# Use the fact that H/D M and S are in alphabetical order
if prevnumber and delim > lastdelim and not (lastdelim == 'D' and delim == 'H'):
if number < 0.0:
if delim in ['H', 'D']: # Process negative numbers
number *= -1.0
sign = -1
else:
return None, "Invalid: No negative numbers allowed in m and s"
if delim in ['M', 'S']:
if number >= 60.0:
return None, "Invalid: No number >= 60 allowed in m and s"
total += number * f
lastdelim = delim
else:
return None, "Invalid syntax for sexagesimal numbers"
prevnumber = False
adelimiter = True
if prevnumber and not adelimiter:
total += number # Leftover. Must be seconds because 's' is assumed if nothing follows
if converthour2deg:
total *= 15.0 # From hours to degrees
return [sign*total/3600.0], '' # Return as a list because it will be transformed to a NumPy array
def mysplit(tstring):
"""--------------------------------------------------------------------
Purpose: This function splits a string into tokens. Whitespace
is a separator. Characters between parentheses or
curly brackets and quotes/double quotes are parsed
unaltered.
Inputs:
tstring- A string with expression tokens
Returns: A list with tokens
Notes: Parenthesis are used for functions e.g. atan().
Curly brackets are used to identify sky definitions.
Square brackets allow the use of lists e.g. [1,2,3,4].
Quotes group characters into one token.
The square bracket used within quotes is not parsed.
Without quotes, '[' is replaced by 'a[' which
uses the array generator from class __a.
-----------------------------------------------------------------------"""
pardepth = 0
brackdepth = 0
sqbdepth = 0
quote = False
tokens = ['']
ws = whitespace + ',' # Extend separators with comma
for ch in tstring :
if ch == '(':
pardepth += 1
elif ch == ')':
pardepth -= 1
elif ch == '{':
brackdepth +=1
elif ch == '}':
brackdepth -=1
elif ch == '[':
sqbdepth += 1
elif ch == ']':
sqbdepth -= 1
elif ch in ('"', "'") :
quote = not quote
if ch != '"':
ch = '' # Copy quotes or not
if ch in ws and not (sqbdepth or brackdepth or pardepth or quote):
if tokens[-1] != '' : tokens.append('')
else:
if ch == '[' and not quote: # Start syntax for array generator
tokens[-1] += 'a'
tokens[-1] += ch
return tokens
class Coordparser(object):
"""--------------------------------------------------------------------
Purpose: Parse a string which represents position(s). Return an object
with the sorted input grids and world coordinates.
First a pre parser finds the tokens in the string. Tokens are
separated by a comma or whitespace.
A group of characters enclosed by single or double quotes form one token.
This enables a user to group numbers (with a sky system, a spectral
translation and/or a unit)
Characters enclosed by (), {} or [] are transferred unparsed. This allows
a user to enter:
1) parameters for functions, e.g. pos="atan2(x,y)"
2) group parameters of a sky system, e.g. pos="{eq, J1983.5}"
3) lists and arrays, e.g. POS="[0,1,2,3]"
4) expressions for Python's eval() 'restricted' parser
Strings between single quotes are parsed unaltered. Except the quotes themselves.
They are removed.
Strings between double quotes are parsed unaltered. This includes the double quotes.
This is necessary to pass file names
Description of the token parser:
token END: #
token FILE A file on disk
token READCOL A file on disk
token NUM is a plain number
token SNUM is a sexagesimal number
token UNIT is a unit
token WORLD NUM followed by UNIT
token SKY One of EQ, EC, GA, SG or [SKY,parameters]
token SPECTR A compatible spectral translation
goal: positions END
positions: N (coordinates)*3 or datafromfile
coordinate: a grid or a world coordinate or sequence from file
grid: NUM: valid result of evalexpr() or result of Pythons eval() function
or result of READCOL
unit: UNIT: valid result of unitfactor
world: SNUM or NUM UNIT or sky or spectral
sky: SKY world or SKY NUM
spectral: SPECTR world or SPECTR NUM
---------------------------------------------------------------------"""
def __init__(self, tokenstr, ncoords, siunits, types, crpix,
naxis, translations, source,
gipsygrids=False):
"""--------------------------------------------------------------------
Purpose: Initialize the coordinate parser.
Inputs:
tokenstr- String with coordinate information, to be parsed by this
routine.
ncoords- The number of axes in the data structure for which we want
to parse positions. One position is 'ncoords' coordinates
siunits- A list with si units for each axis in the data structure
types- A list with axis types (e.g. 'longitude', 'latitude'). With
this list the parser can decide whether a sky system or a
spectral translation could be applied.
crpix- A list with reference pixels for each axis in the data
structure. This is needed to parse symbol 'PC'.
naxis- A list with lengths of each axes. This is needed to parse
symbol 'AC'.
translations- A list with all the spectral translations that are
possible for the selected data set.
gipsygrids- A Boolean that sets the GIPSY flag for using the grid
system instead of pixel coordinates. Grid 0 corresponds to
the value of CRPIX in the header.
Returns: This constructor instantiates an object from class 'Coordparser'. The
most important attributes are:
errmes- which contains an error message if the parsing was
not successful.
positions- zero, one or more positions (each position is 'ncoords'
numbers)
One position for ncoords=2 could be something like this:
[([308.71791541666664], 'w', '', ''),
([60.153887777777783], 'w', '', '')]
It contains two tuples for the coordinates.
One coordinate is a tuple with:
1) A list with zero, one or more numbers
2) A character 'g' to indicate that these numbers are grids
or a character 'w' to indicate that these numbers are
world coordinates.
3) A number or a tuple that sets the sky system
4) A spectral translation
-----------------------------------------------------------------------"""
# Start pre-parsing the token string
# This means that data between single quotes and curly brackets
# are stored as one token, so that the evaluation can be postponed
# and processed by special evaluators
tokstr = tokenstr.strip() + ' #'
tokens = mysplit(tokstr)
self.tokens = []
# This is a pre parsing step to replace one instance of the symbols 'PC' or 'AC'
# by 'ncoords' instances. Each symbol then is parsed for the corresponding axis
for i, t in enumerate(tokens):
if t.upper() == 'PC':
for j in range(ncoords):
self.tokens.append(t)
elif t.upper() == 'AC':
for j in range(ncoords):
self.tokens.append(t)
else:
self.tokens.append(t)
self.tokens.append('#')
self.ncoords = ncoords # i.e. the subset dimension
self.END = '#'
self.tokens.append(self.END) # Append a symbol to indicate end of token list
self.positions = []
self.errmes = ""
self.siunits = siunits
self.types = types
self.crpix = crpix
self.naxis = naxis
self.prevsky = None # A previous sky definition. Symbols {} will copy this
self.source = source
self.gipsygrids = gipsygrids
if translations:
self.strans, self.sunits = list(zip(*translations))
else:
self.strans = []
self.sunits = []
self.goal()
def goal(self):
#-------------------------------------------------------------------
# The final goal is to find a number of positions which each
# consist of 'ncoords' coordinates. The variable 'tpos' keeps
# track of where we are in the token list.
#-------------------------------------------------------------------
tpos = 0
while self.tokens[tpos] != self.END:
position, tpos = self.getposition(tpos)
if position == None:
return
self.positions.append(position)
self.errmes = ''
if tpos >= len(self.tokens): # Just to be save
break
return
def getposition(self, tpos):
#-------------------------------------------------------------------
# We need ncoords coordinates to get one position.
# In the return value, the type is included. The type is
# either 'g' for a pixel, 'w' for a world coordinate
# and 'x' for a real error that should stop parsing.
#-------------------------------------------------------------------
numcoords = 0
p = []
numval = None
while numcoords < self.ncoords and self.tokens[tpos] != self.END:
val, typ, sky, spec, tposdelta = self.getcoordinate(tpos, numcoords)
if val == None:
return None, tpos
lval = len(val)
if numval == None:
numval = lval
elif lval != numval:
self.errmes = "Error: Different number elements in first=%d, second=%d" % (numval, lval)
return None , tpos
tpos += tposdelta
numcoords += 1
p.append((val, typ, sky, spec))
if numcoords != self.ncoords:
self.errmes = "Error: Not enough coordinates for a position"
return None, tpos
return p, tpos
def getcoordinate(self, tpos, coordindx):
#-------------------------------------------------------------------
# What is a coordinate? It can be a plain number (a grid) or a sequence
# of plain numbers. It could also be a world coordinate associated
# with a sky system or a world coordinate followed by a unit
#-------------------------------------------------------------------
number, typ, tposdelta = self.getnumber(tpos, coordindx)
if number != None:
if typ == 'g' and self.gipsygrids:
offgrid2pix = nint(self.crpix[coordindx])
number = [w+offgrid2pix for w in number]
return number, typ, '', '', tposdelta
else:
if typ != 'x':
if self.types[coordindx] in ['longitude', 'latitude']:
# Another possibility: it could be a coordinate with sky
world, sky, tposdelta = self.getsky(tpos, coordindx)
if world != None:
return world, 'w', sky, '', tposdelta
elif self.types[coordindx] == 'spectral':
world, spectral, tposdelta = self.getspectral(tpos, coordindx)
if spectral != None:
return world, 'w', '', spectral, tposdelta
else:
self.errmes = "Error: Not a grid nor world coord. sky or spectral parameter"
return None, '', '', '', 0
def getnumber(self, tpos, coordindx, unit=None):
#-------------------------------------------------------------------
# Allow a different unit if the unit is changed by a spectral translation
#
# POS='0 1 4' '242 243 244' km/s # Grouping of 3 grids and 3 world coordinates with unit
# POS= 0 -243 km/s 0 -244 km/s
#-------------------------------------------------------------------
global source
tryother = False
currenttoken = self.tokens[tpos]
number = None
if currenttoken.startswith('{'): # Fast way out. Cannot be a number
return None, '', 0
source = self.source
# Try it as argument for Python's eval() with retrictions
try:
x = eval_restrict(currenttoken)
if isinstance(x, (tuple, ndarray)):
x = list(x)
if not isinstance(x, list): # These two types cannot be combined. x = list(x) will raise except.
x = [x]
number = x
except Exception as message:
self.errmes = usermessage(currenttoken, message)
tryother = True
# Not a number or numbers from a file. Perhaps a sexagesimal number
# candidate = re_findall('[hmsHMSdD]', currenttoken)
if tryother:
tokupper = currenttoken.upper()
h_ind = tokupper.find('H')
d_ind = tokupper.find('D')
candidate = (h_ind >= 0 or d_ind >= 0) and not (h_ind >= 0 and d_ind >= 0)
if candidate:
m_ind = tokupper.find('M')
if m_ind >= 0:
candidate = (m_ind > h_ind and m_ind > d_ind)
if candidate:
world, errmes = parsehmsdms(currenttoken, self.types[coordindx])
if errmes == '':
return world, 'w', 1
else:
self.errmes = usermessage(currenttoken, errmes)
return None, '', 0
elif currenttoken.upper() == 'PC':
# 'PC' represents the projection center for spatial axes
# but more general, it is the position of the reference pixel.
# In GIPSY grids, this position is located somewhere in grid 0.
# Note that this routine does not know whether pixels or grids
# are entered. In the context of GIPSY we have to force the
# pixel that represents 'PC' to a grid, because the calling
# environment (GIPSY) expects the input was a grid. The conversion
# is done elsewhere in this class (getcoordinate()).
pc = self.crpix[coordindx]
if self.gipsygrids:
# Go from FITS pixel to grid
pc -= nint(self.crpix[coordindx])
return [pc], 'g', 1
elif currenttoken.upper() == 'AC':
# Next code is compatible to code in cotrans.c only we made the expression
# simpler by rewriting the formula so that cotrans' offset is not necessary.
n = self.naxis[coordindx]
ac = 0.5 * (n+1)
if self.gipsygrids:
# Go from FITS pixel to grid. See also comment at 'PC'
# We have to do this because elsewhere pixels are
# converted to grids if gipsygrids=True. So compensate
# that correction here.
ac -= nint(self.crpix[coordindx])
return [ac], 'g', 1
else:
# No number nor a sexagesimal number
return None, '', 0
if number == None: # Just to be sure
return None, '', 0
# One or more numbers are parsed. The numbers could be modified if a unit follows
nexttoken = self.tokens[tpos+1]
if nexttoken != self.END:
if unit != None:
siunit = unit
else:
siunit = str(self.siunits[coordindx])
unitfact = None
unitfact, message = unitfactor(nexttoken, siunit)
if unitfact is None:
self.errmes = usermessage(nexttoken, message)
else:
world = [w*unitfact for w in number]
return world, 'w', 2 # two tokens scanned
return number, 'g', 1
def getsky(self, tpos, coordindx):
#-------------------------------------------------------------------
# Process sky systems.
# A sky system is always associated with a spatial axis.
# It is either one of the list 'eq', 'ec', 'ga' 'sg'
# or it is a list enclosed in curly brackets '{', '}'
# Examples:
# Assume an equatorial system and a subset with two spatial axes:
#
# POS=EQ 50.3 23 ; World coordinate in the equatorial system and a grid
# POS=Eq 50.3 23 ; Same input. Sky definition is case insensitive
# POS=eq 50.3 eq 10.0 ; Both coordinates are world coords
# POS=eq 50.3 ga 10.0 ; Mixed sky systems not allowed. No warning
# POS=ga 210 ga -30.3 ; Two world coordinates in the galactic II system
# POS=g 210 g -30.3 ; These are two positions in grids because g is a number
# POS=ga 140d30m ga 62d10m ; Use sexagesimal numbers
# POS=eq 50.3 [] 10.0 ; Repeat sky system for the last coordinate
# POS=eq 50.3 10.0 deg ; Same input as previous. Units of spatial axis is degrees
# POS=eq 50.3 10*60 arcmin ; Same input. Note use of expression and compatible units
# POS={eq} 50.3 {} 10.0 ; Group the sky system with square brackets
#
# POS={eq,J1983.5,fk5} 20.2 {} -10.0 ; A world coordinate defined in an equatorial systems
# at equinox J1983.5 in the reference system fk5.
# The second coordinate is a world coordinate
# in the same sky system.
# POS={eq,J1983.5,fk5} 20.2 -10 deg ; Second coordinate is a world coordinate in the
# same sky system.
# POS={eq,J1983.5,fk5} 20.2 -10 ; Not allowed: A world coordinate defined in an
# equatorial systems at equinox J1983.5 in the reference
# system fk5. Followed by a grid. This cannot be evaluated
# because a solution of the missing coordinate can
# only be found in the native sky system.
#-------------------------------------------------------------------
self.errmess = ""
currenttoken = self.tokens[tpos]
try:
sk, errmes = parseskysystem(currenttoken)
if sk[0] == None: # Empty skydef {}
skydef = ''
if self.prevsky != None:
skydef = self.prevsky
else:
skydef = sk # Copy the PARSED sky definition!
except Exception as message:
skydef = None
self.errmes = usermessage(currenttoken, message)
if skydef != None:
nexttoken = self.tokens[tpos+1]
if nexttoken != self.END:
number, typ, tposdelta = self.getnumber(tpos+1, coordindx)
if number != None:
return number, skydef, tposdelta+1
else:
# No number no world coordinate
self.errmes = "Error: '%s' is a sky system but not followed by grid or world coord." % currenttoken
return None, '', 0
else:
# A sky but nothing to parse after this token
self.errmes = "Error: '%s' is a sky system but not followed by grid or world coord." % currenttoken
return None, '', 0
return None, '', 0
def getspectral(self, tpos, coordindx):
#-------------------------------------------------------------------
# This routine deals with spectral axes. A spectral translation must
# be one of the allowed translations for the data for which a possible
# is required. The translation option must be given before a number.
# It can be followed by a unit. We expect the user has (FITS) knowledge
# about the meaning of the translations.
# Examples:
# POS= 0 243 km/s
# POS= 0 vopt 243 km/s
# POS= 0 beta -243000/c ; beta = v/c
#-------------------------------------------------------------------
currenttoken = self.tokens[tpos] # One of VOPT, VRAD etc.
indx = minmatch(currenttoken, self.strans, 0)
if indx >= 0:
spectral = self.strans[indx]
unit = self.sunits[indx]
nexttoken = self.tokens[tpos+1]
if nexttoken != self.END:
number, typ, tposdelta = self.getnumber(tpos+1, coordindx, unit)
if number != None:
return number, spectral, tposdelta+1
else:
# No number and no world coordinate
self.errmes = "Error: '%s' is a spectral trans. but without grid or world c." % currenttoken
return None, '', 0
else:
# A spectral translation but nothing to parse after this token
self.errmes = "Error: '%s' is a spectral trans. but without grid or world c." % currenttoken
return None, '', 0
else:
# Not a spectral translation:
return None, '', 0
def dotrans(parsedpositions, subproj, subdim, mixpix=None):
#-------------------------------------------------------------------
"""
This routine expects pixels in gcoord and will also return pixels
"""
#-------------------------------------------------------------------
skyout_orig = subproj.skyout # Store and restore before return
errmes = '' # Init error message to no error
r_world = []
r_pixels = []
subsetunits = None
#if gipsygrids:
# # First we determine the -integer- offsets to transform grids
# # into 1-based FITS pixels
# offset = [0.0]*subdim
# for i in range(subdim):
# offset[i] = nint(subproj.crpix[i])
for p in parsedpositions:
wcoord = [unknown]*subdim # A list with tuples with a number and a conversion factor
gcoord = [unknown]*subdim
empty = [unknown]*subdim
# Reset sky system to original.
subproj.skyout = None
# p[i][0]: A list with one or more numbers
# p[i][1]: the mode ('g'rid or 'w'orld)
# p[i][2]: the sky definition
# p[i][3]: the spectral definition
skyout = None # Each position can have its own sky system
for i in range(subdim): # A position has 'subdim' coordinates
try:
numbers = asarray(p[i][0]) # Contents of coordinate number 'i' (can be a list with numbers)
except:
errmes = "Sequence not ok. Perhaps array is not flat"
return [], [], [], errmes
# Numbers here is always a LIST with 1 or more numbers. Make a NumPy
# array of this list to facilitate grid to pixel conversions
if numbers.shape == ():
N = 1
else:
N = numbers.shape[0]
if p[i][1] == 'g':
# Convert from grid to pixel
gcoord[i] = numbers
#if gipsygrids:
# gcoord[i] += offset[i]
wcoord[i] = asarray([unknown]*N)
else:
gcoord[i] = asarray([unknown]*N)
wcoord[i] = numbers
empty[i] = asarray([unknown]*N)
nsky = p[i][2]
# We parsed the skyout to the tuple format so we can compare 2 systems
# i.e. compare two tuples
if nsky != '':
if skyout == None: # Not initialized: start with this sky
skyout = nsky
else:
if nsky != skyout:
errmes = "Mixed sky systems not supported"
return [], [], [], errmes
if mixpix != None:
gcoord.append(asarray([mixpix]*N))
wcoord.append(asarray([unknown]*N))
empty.append(asarray([unknown]*N))
spectrans = None # Input spectral translation e.g. POS=vopt 105000
for i in range(subdim):
# The spectral axis could be any coordinate in a position, so
# check them all. WCSLIB allows for only one spectral
# axis in a dataset (which in practice makes sense).
# So break if we find the first spectral translation
spectrans = p[i][3]
if spectrans:
break
if spectrans:
newproj = subproj.spectra(spectrans)
else:
newproj = subproj
if skyout != None and skyout != "":
newproj.skyout = skyout
else:
newproj.skyout = None # Reset sky system
# The mixed method needs two tuples with coordinates. Each coordinate
# can be a list or a numpy array. The mixed routine recognizes
# pixel only input and world coordinate only input and is optimized
# to deal with these situations.
try:
wor, pix = newproj.mixed(tuple(wcoord), tuple(gcoord))
except wcs.WCSerror as message:
errmes = str(message.args[1]) # element 0 is error number
# Restore to the original projection object
# Note that 'newproj' could be pointer to 'subproj' which shares the same skyout
# and the skyout could have been changed.
subproj.skyout = skyout_orig
return [], [], [], errmes
# Now we have the pixels and want the world coordinates in the original
# system. Then first reset the skyout attribute.
subproj.skyout = skyout_orig
# Get world coordinates in system of input projection system
wor = subproj.toworld(tuple(pix))
subsetunits = subproj.cunit # Set units to final units
# pix is a tuple with 'subdim' coordinates. But note: each coordinate
# can be an array with one or more numbers.
# Make a NumPy array of this tuple and transpose the array
# to get one position (i.e. subdim coordinates) in one row.
wt = asarray(wor).T
pt = asarray(pix).T
# Append to the results list. Note that list appending is more flexible than
# NumPy array concatenation.
for w, p in zip(wt, pt):
r_world.append(w)
r_pixels.append(p)
return asarray(r_world), asarray(r_pixels), subsetunits, errmes
def str2pos(postxt, subproj, mixpix=None, gridmode=False):
#-------------------------------------------------------------------
"""
This function accepts a string that represents a position in the
world coordinate system defined by *subproj*. If the string
contains a valid position, it returns a tuple with numbers that
are the corresponding pixel coordinates and a tuple with
world coordinates in the system of *subproj*. One can also
enter a number of positions. If a position could not be
converted then an error message is returned.
:param postxt: The position(s) which must be parsed.
:type postxt: String
:param subproj: A projection object (see :mod:`wcs`).
Often this projection object will describe
a subset of the data structure (e.g. a
channel map in a radio data cube).
:type subproj: :class:`wcs.Projection` object
:param mixpix: For a world coordinate system with one spatial
axis we need a pixel coordinate for the missing
spatial axis to be able to convert between
world- and pixel coordinates.
:type mixpix: Float
:param gridmode: If True, correct pixel position for CRPIX to
get grid coordinates where the pixel at CRPIX is 0
:type gridmode: Boolean
:Returns:
This method returns a tuple with four elements:
* a NumPy array with the parsed positions in world coordinates
* a NumPy array with the parsed positions in pixel coordinates
* A tuple with the units that correspond to the axes
in your world coordinate system.
* An error message when a position could not be parsed
Each position in the input string is returned in the output as an
element of a numpy array with parsed positions. A position has the same
number of coordinates are there are axes in the data defined by
the projection object.
:Examples:
::
from kapteyn import wcs, positions
header = { 'NAXIS' : 2,
'BUNIT' :'w.u.',
'CDELT1' : -1.200000000000E-03,
'CDELT2' : 1.497160000000E-03,
'CRPIX1' : 5,
'CRPIX2' : 6,
'CRVAL1' : 1.787792000000E+02,
'CRVAL2' : 5.365500000000E+01,
'CTYPE1' :'RA---NCP',
'CTYPE2' :'DEC--NCP',
'CUNIT1' :'DEGREE',
'CUNIT2' :'DEGREE'}
proj = wcs.Projection(header)
position = []
position.append("0 0")
position.append("eq 178.7792 eq 53.655")
position.append("{eq} 178.7792 {} 53.655")
position.append("{} 178.7792 {} 53.655")
position.append("178.7792 deg 53.655 deg")
position.append("11h55m07.008s 53d39m18.0s")
position.append("{eq, B1950,fk4} 178.7792 {} 53.655")
position.append("{eq, B1950,fk4} 178.12830409 {} 53.93322241")
position.append("{fk4} 178.12830409 {} 53.93322241")
position.append("{B1983.5} 11h55m07.008s {} 53d39m18.0s")
position.append("{eq, B1950,fk4, J1983.5} 178.12830409 {} 53.93322241")
position.append("ga 140.52382927 ga 61.50745891")
position.append("su 61.4767412, su 4.0520188")
position.append("ec 150.73844942 ec 47.22071243")
position.append("eq 178.7792 0.0")
position.append("0.0, eq 53.655")
for pos in position:
poswp = positions.str2pos(pos, proj)
if poswp[3] != "":
raise Exception, poswp[3]
world = poswp[0][0]
pixel = poswp[1][0]
units = poswp[2]
print pos, "=", pixel, '->', world , units
"""
#-------------------------------------------------------------------
if not isinstance(postxt, six.string_types):
raise TypeError("str2pos(): parameter postxt must be a String")
subdim = len(subproj.types)
if mixpix != None:
subdim -= 1
r_world = []
r_pixels = []
parsedpositions = Coordparser(postxt, # Text containing positions as entered by user
subdim, # The number of coordinates in 1 position
subproj.units, # Units (for conversions) in order of subset axes
subproj.types, # Axis types to distinguish spatial and spectral coords.
subproj.crpix, # Crpix values for 'PC' (projection center)
subproj.naxis, # Axis lengths for center 'AC'
subproj.altspec,# List with allowed spectral translations
subproj.source, # Get access to header
gipsygrids=gridmode)
if parsedpositions.errmes:
if postxt != '':
return [], [], [], parsedpositions.errmes
else:
# Note that the array with parsed positions cannot contain grids,
# because the routine that converts them expects pixel
# coordinates (because mixpix is a pixelcoordinate)
wor, pix, subsetunits, errmes = dotrans(parsedpositions.positions,
subproj,
subdim,
mixpix)
if errmes != '':
return [], [], [], errmes
return wor, pix, subsetunits, ''
def dotest():
def printpos(postxt, pos):
# Print the position information
world, pixels, units, errmes = pos
print(("Expression : %s"%postxt))
if errmes == '':
print(("World coordinates :", world, units))
print(("Pixel coordinates :", pixels))
else:
print(errmes)
print("")
header = { 'NAXIS' : 3,
'BUNIT' : 'w.u.',
'CDELT1' : -1.200000000000E-03,
'CDELT2' : 1.497160000000E-03,
'CDELT3' : -7.812500000000E+04,
'CRPIX1' : 5,
'CRPIX2' : 6,
'CRPIX3' : 7,
'CRVAL1' : 1.787792000000E+02,
'CRVAL2' : 5.365500000000E+01,
'CRVAL3' : 1.4154482500E+09, # Tuned to fit Vopt
'CTYPE1' : 'RA---NCP',
'CTYPE2' : 'DEC--NCP',
'CTYPE3' : 'FREQ-OHEL',
'CUNIT1' : 'DEGREE',
'CUNIT2' : 'DEGREE',
'CUNIT3' : 'HZ',
'DRVAL3' : 1.050000000000E+03,
'DUNIT3' : 'KM/S',
'FREQ0' : 1.420405752e+9,
'INSTRUME' : 'WSRT',
'NAXIS1' : 10,
'NAXIS2' : 10,
'NAXIS3' : 10
}
#wcs.debug=True
origproj = wcs.Projection(header)
print("-------------- Examples of numbers and constants, missing spatial--------------\n")
proj = origproj.sub((1,3,2))
mixpix = 6
userpos = ["(3*4)-5 1/5*(7-2)",
"abs(-10), sqrt(3)",
"sin(radians(30)), degrees(asin(0.5))",
"cos(radians(60)), degrees(acos(0.5))",
"pi, tan(radians(45))-0.5, 3*4,0",
"sin(arange(10)), range(10)",
"atan2(2,3), 192",
"atan2(2,3) 192",
"[pi,2]*3, [e**2,tan(pi)]*3",
"[1,2, atan2(2,0.9)] [pi**2::3]",
"c_/299792458.0, G_/6.67428e-11",
"sin([1,2,3]*pi),cos([1,2,3]*pi)",
"[1,2,3,4], sin(radians([0,30,60,90]))",
"10**[1,2,3], log2([1,2,3])",
"[1,2,3,4], sin(radians([0,30,60,90]))",
"deg([1,2,3,4]), rad([0,30,60,90])",
"[pi::3], [1,2,3]",
"[pi::3]*3, [1:3]**3",
"[1,6/3,3,4]**3, pi*[1,2,3,4]",
"[10:1], [1:10]",
"[10:0:-2], [0:10:2]",
"linspace(0,3,4), tan(radians(linspace(3,0,4)))",
"'1/2 ,sin(pi), 4' range(3)",
"[3:5,10]/2 range(4)",
"'[pi]+[1,2]' [1::3]",
"'[pi]*3' range(3)",
"'[sin(x) for x in range(4)]' range(4)"
]
for postxt in userpos:
wp = str2pos(postxt, proj, mixpix=mixpix)
printpos(postxt, wp)
print('')
print("-------------- Examples of 1 spatial axis and a missing spatial--------------\n")
proj = origproj.sub((1,2))
mixpix = 6
userpos = ["(3*4)",
"10",
"178.7792*60 arcmin",
"{} 178.7792",
"{B2000} 178.7792", # Not allowed
"'178.7792, 178.7794, 178.7796' deg",
"[178.7792, 178.7794, 178.7796] deg",
"[178.7792:178.7796:0.0002] deg",
"arange(178.7792, 178.7796, 0.0002) deg",
"linspace(178.7792, 178.7796, 4) deg",
"linspace(178.7792, 178.7796, 4) ?",
"linspace(178.7792, 178.7796, 4) Units",
"linspace(178.7792, 178.7796, 4) Un",
"3*arange(178.7792/3, 178.7796/3, 0.0002) deg",
"eq 178.7792", # Not allowed
"11h55m07.008s",
"178d40m",
"178d",
"178d10m 178d20m30.5s"
]
for postxt in userpos:
wp = str2pos(postxt, proj, mixpix=mixpix)
printpos(postxt, wp)
print('')
print("-------------- Examples of units, spectral translations and grouping -----------\n")
proj = origproj.sub((3,))
userpos = ["7 0",
"1.4154482500E+09 hz",
"1.4154482500E+03 Mhz",
"1.4154482500 Ghz",
"vopt 1.05000000e+06",
"vopt 1050 km/s",
"vopt 0",
"vrad 1.05000000e+06",
# f/c is lambda. For this f (=CRVAL3) this gives lambda:
# 299792458.0/1.4154482500E+09 = 0.21180036642102598
# The wave number is 1/lambda. If we use this as world coordinate
# then it should convert to crpix (=7)
"wavn [100/21.180036642102598/100, 4.76/100, 4.7/100] 1/cm",
"FREQ 1.4154482500E+09", # Note FREQ is not FREQ-OHEL
"0 7 10 20",
"'1.41, 1.4154482500, 1.42, 1.43' Ghz",
"[1.41, 1.4154482500, 1.42, 1.43] Ghz"
]
for postxt in userpos:
wp = str2pos(postxt, proj)
printpos(postxt, wp)
print('')
print("--------- Output of previous coordinates in terms of VOPT:----------\n")
proj2 = proj.spectra('VOPT-???')
userpos = ["7",
"freq 1.4154482500E+09 hz",
"fr 1.4154482500E+03 Mhz",
"fr 1.4154482500 Ghz",
"vopt 1.05000000e+06",
"vopt 1050 km/s",
"vopt 0",
"vrad 1.05000000e+06",
"FREQ 1.4154482500E+09",
"0 7 10 20 70.233164383215",
"FREQ '1.41, 1.4154482500, 1.42, 1.43' Ghz",
"FR [1.41, 1.4154482500, 1.42, 1.43] Ghz"
]
for postxt in userpos:
wp = str2pos(postxt, proj2)
printpos(postxt, wp)
print('')
print("--------- Sky systems and AC&PC ----------\n")
proj = origproj.sub((1,2))
userpos = ["0 0",
"5,6 0 0 3,1",
"eq 178.7792 eq 53.655", # e 10 will not work because e is a symbol and an ambiguous sky system`
"eq [178.7792:178.7796:0.0002] eq [53.655::3]",
"{eq} 178.7792 {} 53.655",
"178.7792 deg 53.655 deg",
"11h55m07.008s 53d39m18.0s",
"{eq, B1950,fk4} 178.7792 {} 53.655",
"{eq, B1950,fk4} 178.12830409 {} 53.93322241",
"{fk4} 178.12830409 {} 53.93322241",
"{B1983.5} 11h55m07.008s {} 53d39m18.0s",
"{eq, B1950,fk4, J1983.5} 178.12830409 {} 53.93322241",
"ga 140.52382927 ga 61.50745891",
"ga 140.52382927 {} 61.50745891",
"su 61.4767412, su 4.0520188",
"su [61.47674:61.47680:0.00002], {} [4.0520188::4]",
"ec 150.73844942 ec 47.22071243",
"{} 178.7792 6.0",
"5.0, {} 53.655",
"{eq} '178.779200, 178.778200, 178.777200' {} '53.655000, 53.656000, 53.657000'",
"PC",
"ac"]
for postxt in userpos:
wp = str2pos(postxt, proj)
printpos(postxt, wp)
print('')
print("--------- Same as previous but in terms of Galactic coordinates ----------\n")
sky_old = proj.skyout
proj.skyout = "ga"
for postxt in userpos:
wp = str2pos(postxt, proj)
printpos(postxt, wp)
print('')
proj.skyout = sky_old
print("--------- XV map: one spatial and one spectral axis ----------\n")
proj = origproj.sub((2,3,1))
mixpix = 5
print(("Spectral translations: ", proj.altspec))
userpos = ["{} 53.655 1.4154482500E+09 hz",
"{} 53.655 1.4154482500E+03 Mhz",
"53.655 deg 1.4154482500 Ghz",
"{} 53.655 vopt 1.05000000e+06",
"{} 53.655 , vopt 1050000 m/s",
"0.0 , vopt 1050000 m/s",
"10.0 , vopt 1050000 m/s",
"{} 53.655 vrad 1.05000000e+06",
"{} 53.655 FREQ 1.4154482500e+09",
"{} 53.655 wave 21.2 cm",
"{} [53.655, 53.6555] wave [21.2, 21.205] cm",
"{} '53.655, 53.6555' wave '21.2, 21.205' cm",
"{} [53.655::5] wave linspace(21.2,21.205,5) cm",
"{} 53.655 vopt c_/300 m/s"]
for postxt in userpos:
wp = str2pos(postxt, proj, mixpix=mixpix)
printpos(postxt, wp)
print('')
# Create an Ascii file with dummy data for testing the READCOL command
# The data in the Ascii file is composed of a fixed sequence of grids
# that are transformed to their corresponding galactic coordinates.
asciifile = "test123.txt"
f = open(asciifile, 'w')
s = "! Test file for Ascii data and the FILE command\n"
f.write(s)
s = "! Extra comment to distinguish between lines and rows\n"
f.write(s)
for i in range(10):
f1 = 1.0* i; f2 = f1 * 2.0; f3 = f2 * 1.5; f4 = f3 * 2.5
s = "%f %.12f %f %.12f\n" % (f1, f2, f3, f4)
f.write(s)
f.write("\n")
for i in range(10,15):
f1 = 1.0* i; f2 = f1 * 2.0; f3 = f2 * 1.5; f4 = f3 * 2.5
s = "%f %.12f %f %.12f\n" % (f1, f2, f3, f4)
f.write(s)
f.close()
asciifile = "hmsdms.txt"
f = open(asciifile, 'w')
s = "! Test file for Ascii data and the READHMS/READDMS command\n"
f.write(s)
s = "11 57 .008 53 39 18.0\n"; f.write(s)
s = "11 58 .008 53 39 17.0\n"; f.write(s)
s = "11 59 .008 53 39 16.0\n"; f.write(s)
f.close()
print("--------- Reading from file ----------\n")
proj = origproj.sub((1,2))
userpos = [ 'readcol("test123.txt") readcol("test123.txt",3)',
'10*readcol("test123.txt") sin(readcol("test123.txt",3))',
'readcol("test123.txt", col=1) readcol("test123.txt", col=3)',
'readcol("test123.txt", col=1) readcol("test123.txt", col=3)',
'readcol("test123.txt", col=1, toline=5) readcol("test123.txt", col=3, toline=5)',
# There is an empty line at line 13
'readcol("test123.txt", col=1, toline=14) readcol("test123.txt", col=3, toline=14)',
'readcol("test123.txt", col=1, fromline=5) readcol("test123.txt", col=3, fromline=5)',
'readcol("test123.txt", col=1, fromrow=5) readcol("test123.txt", col=3, fromrow=5)',
'readcol("test123.txt", col=1, torow=5) readcol("test123.txt", col=3, torow=5)',
'readcol("test123.txt", col=1, torow=12) readcol("test123.txt", col=3, torow=12)',
'readcol("test123.txt", col=1, rowstep=2) readcol("test123.txt", col=3, rowstep=2)',
'readcol("test123.txt", col=1, rows=[2,4,14]) readcol("test123.txt", col=3, rows=[2,4,14])',
'readcol("test123.txt", col=1, fromrow=4, torow=14, rowstep=2) linspace(0,1,6)',
'readcol("test123.txt", col=1, fromrow=4, torow=14, rowstep=2) [4:14:2]',
'{} readcol("test123.txt", col=1) {} readcol("test123.txt", col=3)',
'ga readcol("test123.txt", col=1) ga readcol("test123.txt", col=3)',
'readcol("test123.txt", col=1) deg readcol("test123.txt", col=3) deg',
'{} readhms("hmsdms.txt",1,2,3) {} readdms("hmsdms.txt",4,5,6)',
'1.1*readhms("hmsdms.txt",1,2,3)-5 sin(readdms("hmsdms.txt",4,5,6)-10.1)',
'{} readhms("hmsdms.txt",col1=1, col3=2, col2=3) {} readdms("hmsdms.txt",4,5,6)',
]
for postxt in userpos:
wp = str2pos(postxt, proj)
printpos(postxt, wp)
print('')
print("--------- Reading from header ----------\n")
proj = origproj.sub((1,2))
userpos = [ '{} header("crval1") {} header("crval2")',
'3*header("crpix1") sin(header("crpix2"))' ]
for postxt in userpos:
wp = str2pos(postxt, proj)
printpos(postxt, wp)
print('')
print("--------- Problem strings and error messages ----------\n")
proj = origproj.sub((1,2))
userpos = ["33",
"1 2 3",
"eq 178, ga 40",
"22 {}",
"10, 53 heg",
"readcol() readcol()", # No file name
'readcol("test123.txt, 1) readcol("test123.txt", 3)', # missing "
'readcol("test123.txt", 1, range(1:4)) 3:5', # 3:5 unknown syntax
'readcol("test123.txt", 3, rows=[1,2,3,4])',
'readcol("test123.txt", 1, rowsslice(5,None)) readcol("test123.txt", 2, rowslice=(5,None))',
'readcol("test123.txt", 1, row=3) readcol("test123.txt", 2, row=3)',
'{ga} readcol("test123.txt", 2) {} readcol("test123wcsRADECFREQ".txt, 4)', # mixed
'{ga} readcol("test123.txt", col=1) {} readcol("test123.txt", col=3)',
"'1, 2, a[1,2,3]' range(5)", # Array in list is not allowed
'readcol(exec saveeval.py) readcol(test123.txt,3)',
'readcol("test123.txt", issequence(3)+1) readcol(test123.txt,3)',
'readcol("test123.txt", eval("pi=2")) readcol(test123.txt,3)',
"readcol('test123.txt') readcol('test123.txt',3)", # Use double quotes for keys
"'[1:3], [4:7]', range(2)" # List in list not allowed
]
for postxt in userpos:
wp = str2pos(postxt, proj)
printpos(postxt, wp)
print('')
import readline
upos = 'xxx'
proj = origproj.sub((1,2)); mixpix = None
while upos != '':
upos = eval(input("Enter position(s) ..... [quit loop]: "))
readline.add_history(upos)
if upos != '':
wp = str2pos(upos, proj, mixpix=mixpix)
printpos(upos, wp)
if __name__ == "__main__":
dotest()
|
kapteyn-astroREPO_NAMEkapteynPATH_START.@kapteyn_extracted@kapteyn-master@[email protected]@.PATH_END.py
|
{
"filename": "morphology_1.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py",
"type": "Python"
}
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
title_trackbar_element_shape = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse'
title_trackbar_kernel_size = 'Kernel size:\n 2n +1'
title_erosion_window = 'Erosion Demo'
title_dilation_window = 'Dilation Demo'
## [main]
def main(image):
global src
src = cv.imread(cv.samples.findFile(image))
if src is None:
print('Could not open or find the image: ', image)
exit(0)
cv.namedWindow(title_erosion_window)
cv.createTrackbar(title_trackbar_element_shape, title_erosion_window, 0, max_elem, erosion)
cv.createTrackbar(title_trackbar_kernel_size, title_erosion_window, 0, max_kernel_size, erosion)
cv.namedWindow(title_dilation_window)
cv.createTrackbar(title_trackbar_element_shape, title_dilation_window, 0, max_elem, dilatation)
cv.createTrackbar(title_trackbar_kernel_size, title_dilation_window, 0, max_kernel_size, dilatation)
erosion(0)
dilatation(0)
cv.waitKey()
## [main]
# optional mapping of values with morphological shapes
def morph_shape(val):
if val == 0:
return cv.MORPH_RECT
elif val == 1:
return cv.MORPH_CROSS
elif val == 2:
return cv.MORPH_ELLIPSE
## [erosion]
def erosion(val):
erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window)
erosion_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_erosion_window))
## [kernel]
element = cv.getStructuringElement(erosion_shape, (2 * erosion_size + 1, 2 * erosion_size + 1),
(erosion_size, erosion_size))
## [kernel]
erosion_dst = cv.erode(src, element)
cv.imshow(title_erosion_window, erosion_dst)
## [erosion]
## [dilation]
def dilatation(val):
dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilation_window)
dilation_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_dilation_window))
element = cv.getStructuringElement(dilation_shape, (2 * dilatation_size + 1, 2 * dilatation_size + 1),
(dilatation_size, dilatation_size))
dilatation_dst = cv.dilate(src, element)
cv.imshow(title_dilation_window, dilatation_dst)
## [dilation]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Code for Eroding and Dilating tutorial.')
parser.add_argument('--input', help='Path to input image.', default='LinuxLogo.jpg')
args = parser.parse_args()
main(args.input)
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@samples@python@tutorial_code@imgProc@erosion_dilatation@[email protected]_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/textfont/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="scatterternary.textfont", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@textfont@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/mangadap/spectra/__init__.py",
"type": "Python"
}
|
from .rowstackedspectra import RowStackedSpectra
from .manga import MaNGARSS
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@mangadap@spectra@[email protected]_END.py
|
{
"filename": "write_snana_parquet.py",
"repo_name": "LSSTDESC/elasticc",
"repo_path": "elasticc_extracted/elasticc-main/lib_elasticc2/write_snana_parquet.py",
"type": "Python"
}
|
import sys
import logging
import pathlib
import polars
from read_snana import elasticc2_snana_reader
def main():
esr = elasticc2_snana_reader()
esr.logger.setLevel( logging.DEBUG )
outdir = pathlib.Path( "/data/raknop/ELASTICC2_parquet" )
for objclass in [ 'AGN', 'CART', 'Cepheid', 'EB', 'ILOT', 'KN_B19', 'KN_K17',
'Mdwarf-flare', 'PISN-MOSFIT', 'PISN-STELLA_HECORE',
'PISN-STELLA_HYDROGENIC', 'RRL', 'SL-SN1a', 'SL-SNII', 'SL-SNIb',
'SL-SNIc', 'SLSN-I+host', 'SLSN-I_no_host', 'SNII+HostXT_V19',
'SNII-NMF', 'SNII-Templates', 'SNIIb+HostXT_V19', 'SNIIn+HostXT_V19',
'SNIIn-MOSFIT', 'SNIa-91bg', 'SNIa-SALT3', 'SNIax', 'SNIb+HostXT_V19',
'SNIb-Templates', 'SNIc+HostXT_V19', 'SNIc-Templates',
'SNIcBL+HostXT_V19', 'TDE', 'd-Sct', 'dwarf-nova', 'uLens-Binary',
'uLens-Single-GenLens', 'uLens-Single_PyLIMA' ]:
# for objclass in [ 'KN_K17' ]:
ltcvs = esr.get_all_ltcvs( objclass, agg=True, include_header=True )
ltcvs.write_parquet( outdir / f'{objclass}.parquet' )
sys.stderr.write( f"Did {objclass}\n" )
# ======================================================================
if __name__ == "__main__":
main()
|
LSSTDESCREPO_NAMEelasticcPATH_START.@elasticc_extracted@elasticc-main@lib_elasticc2@[email protected]_END.py
|
{
"filename": "constants.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/doc/constants.py",
"type": "Python"
}
|
"""
=========
Constants
=========
NumPy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True])
>>> np.isnan([np.NZERO])
array([False])
>>> np.isinf([np.NZERO])
array([False])
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True])
>>> np.isnan([np.PZERO])
array([False])
>>> np.isinf([np.PZERO])
array([False])
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@numpy@[email protected]@.PATH_END.py
|
{
"filename": "chemistry.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/burnman/utils/chemistry.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit
# for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2021 by the BurnMan team, released under the GNU
# GPL v2 or later.
# This module provides the functions required to process the
# standard burnman formula formats.
# tools.chemistry returns the number of atoms and molar mass of a compound
# given its unit formula as an argument.
# process_solution_chemistry returns information required to calculate
# solution properties from a set of endmember formulae
from __future__ import absolute_import
import re
import numpy as np
from fractions import Fraction
from collections import Counter
import pkgutil
from string import ascii_uppercase as ucase
from sympy import nsimplify
def read_masses():
"""
A simple function to read a file with a two column list of
elements and their masses into a dictionary
"""
datastream = pkgutil.get_data("burnman", "data/input_masses/atomic_masses.dat")
datalines = [
line.strip() for line in datastream.decode("ascii").split("\n") if line.strip()
]
lookup = dict()
for line in datalines:
data = "%".join(line.split("%")[:1]).split()
if data != []:
lookup[data[0]] = float(data[1])
return lookup
"""
atomic_masses is a dictionary of atomic masses
"""
atomic_masses = read_masses()
"""
IUPAC_element_order provides a list of all the elements.
Element order is based loosely on electronegativity,
following the scheme suggested by IUPAC, except that H
comes after the Group 16 elements, not before them.
"""
IUPAC_element_order = [
"v",
"Og",
"Rn",
"Xe",
"Kr",
"Ar",
"Ne",
"He", # Group 18
"Fr",
"Cs",
"Rb",
"K",
"Na",
"Li", # Group 1 (not H)
"Ra",
"Ba",
"Sr",
"Ca",
"Mg",
"Be", # Group 2
"Lr",
"No",
"Md",
"Fm",
"Es",
"Cf",
"Bk",
"Cm",
"Am",
"Pu",
"Np",
"U",
"Pa",
"Th",
"Ac", # Actinides
"Lu",
"Yb",
"Tm",
"Er",
"Ho",
"Dy",
"Tb",
"Gd",
"Eu",
"Sm",
"Pm",
"Nd",
"Pr",
"Ce",
"La", # Lanthanides
"Y",
"Sc", # Group 3
"Rf",
"Hf",
"Zr",
"Ti", # Group 4
"Db",
"Ta",
"Nb",
"V", # Group 5
"Sg",
"W",
"Mo",
"Cr", # Group 6
"Bh",
"Re",
"Tc",
"Mn", # Group 7
"Hs",
"Os",
"Ru",
"Fe", # Group 8
"Mt",
"Ir",
"Rh",
"Co", # Group 9
"Ds",
"Pt",
"Pd",
"Ni", # Group 10
"Rg",
"Au",
"Ag",
"Cu", # Group 11
"Cn",
"Hg",
"Cd",
"Zn", # Group 12
"Nh",
"Tl",
"In",
"Ga",
"Al",
"B", # Group 13
"Fl",
"Pb",
"Sn",
"Ge",
"Si",
"C", # Group 14
"Mc",
"Bi",
"Sb",
"As",
"P",
"N", # Group 15
"Lv",
"Po",
"Te",
"Se",
"S",
"O", # Group 16
"H", # hydrogen
"Ts",
"At",
"I",
"Br",
"Cl",
"F",
] # Group 17
def dictionarize_formula(formula):
"""
A function to read a chemical formula string and
convert it into a dictionary
:param formula: Chemical formula, written in the XnYm format, where
the formula has n atoms of element X and m atoms of element Y
:type formula: str
:returns: The same chemical formula, but expressed as a dictionary.
:rtype: dict
"""
f = dict()
elements = re.findall("[A-Z][^A-Z]*", formula)
for element in elements:
element_name = re.split("[0-9][^A-Z]*", element)[0]
element_atoms = re.findall("[0-9][^A-Z]*", element)
if len(element_atoms) == 0:
element_atoms = Fraction(1.0)
else:
element_atoms = Fraction(element_atoms[0])
f[element_name] = f.get(element_name, 0.0) + element_atoms
return f
def sum_formulae(formulae, amounts=None):
"""
Adds together a set of formulae.
:param formulae: List of chemical formulae.
:type formulae: list of dictionary or counter objects
:param amounts: List of amounts of each formula.
:type amounts: list of floats
:returns: The sum of the user-provided formulae
:rtype: Counter object
"""
if amounts is None:
amounts = [1.0 for formula in formulae]
else:
assert len(formulae) == len(amounts)
summed_formula = Counter()
for i, formula in enumerate(formulae):
summed_formula.update(
Counter(
{
element: amounts[i] * n_atoms
for (element, n_atoms) in formula.items()
}
)
)
return summed_formula
def formula_mass(formula):
"""
A function to take a chemical formula and compute the formula mass.
:param formula: A chemical formula
:type formula: dict or Counter object
:returns: The mass per mole of formula [kg]
:rtype: float
"""
mass = sum(formula[element] * atomic_masses[element] for element in formula)
return mass
def convert_formula(formula, to_type="mass", normalize=False):
"""
Converts a chemical formula from one type (mass or molar)
into the other. Renormalises amounts if normalize=True.
:param formula: A chemical formula.
:type formula: dict or Counter object
:param to_type: Conversion type, one of 'mass' or 'molar'.
:type to_type: str
:param normalize: Whether or not to normalize the converted formula to 1.
:type normalize: bool
:returns: The converted formula.
:rtype: dict
"""
if to_type == "mass":
f = {
element: n_atoms * atomic_masses[element]
for (element, n_atoms) in formula.items()
}
elif to_type == "molar":
f = {
element: n_atoms / atomic_masses[element]
for (element, n_atoms) in formula.items()
}
else:
raise Exception(
"Value of parameter to_type not recognised. "
'Should be either "mass" or "molar".'
)
if normalize:
s = np.sum([n for (element, n) in f.items()])
f = {element: n / s for (element, n) in f.items()}
return f
def process_solution_chemistry(solution_model):
"""
This function parses a class instance with a "formulas"
attribute containing site information, e.g.
[ '[Mg]3[Al]2Si3O12', '[Mg]3[Mg1/2Si1/2]2Si3O12' ]
It outputs the bulk composition of each endmember
(removing the site information), and also a set of
variables and arrays which contain the site information.
These are output in a format that can easily be used to
calculate activities and gibbs free energies, given
molar fractions of the phases and pressure
and temperature where necessary.
:param solution_model: Class must have a "formulas" attribute,
containing a list of chemical formulae with site information
:type solution model: instance of class
:rtype: None
.. note:: Nothing is returned from this function, but the solution_model
object gains the following attributes:
* solution_formulae [list of dictionaries]
List of endmember formulae in dictionary form.
* empty_formula [string]
Abbreviated chemical formula with sites denoted by empty
square brackets.
* general_formula [string]
General chemical formula with sites denoted by
square brackets filled with a comma-separated list of species
* n_sites [integer]
Number of sites in the solution.
Should be the same for all endmembers.
* sites [list of lists of strings]
A list of species for each site in the solution.
* site_names [list of strings]
A list of species_site pairs in the solution, where
each distinct site is given by a unique uppercase letter
e.g. ['Mg_A', 'Fe_A', 'Al_A', 'Al_B', 'Si_B'].
* n_occupancies [integer]
Sum of the number of possible species on each of the sites
in the solution.
Example: A binary solution [[A][B],[B][C1/2D1/2]] would have
n_occupancies = 5, with two possible species on
Site 1 and three on Site 2.
* site_multiplicities [2D array of floats]
A 1D array for each endmember in the solution,
containing the multiplicities of each site per formula unit.
To simplify computations later, the multiplicities
are repeated for each species on each site, so the shape of
this attribute is (n_endmembers, n_site_species).
* endmember_occupancies [2d array of floats]
A 1D array for each endmember in the solution,
containing the fraction of atoms of each species on each site.
* endmember_noccupancies [2d array of floats]
A 1D array for each endmember in the solution,
containing the number of atoms of each species on each site
per mole of endmember.
"""
formulae = solution_model.formulas
n_sites = formulae[0].count("[")
n_endmembers = len(formulae)
# Check the number of sites is the same for all endmembers
if not np.all(np.array([f.count("[") for f in formulae]) == n_sites):
raise Exception("All formulae must have the same " "number of distinct sites.")
solution_formulae = [{} for i in range(n_endmembers)]
sites = [[] for i in range(n_sites)]
list_occupancies = []
list_multiplicities = np.empty(shape=(n_endmembers, n_sites))
n_occupancies = 0
# Number of unique site occupancies (e.g.. Mg on X etc.)
for i_mbr in range(n_endmembers):
list_occupancies.append([[0] * len(sites[site]) for site in range(n_sites)])
s = re.split(r"\[", formulae[i_mbr])[1:]
for i_site, site_string in enumerate(s):
site_split = re.split(r"\]", site_string)
site_occupancy = site_split[0]
mult = re.split("[A-Z][^A-Z]*", site_split[1])[0]
if mult == "":
list_multiplicities[i_mbr][i_site] = Fraction(1.0)
else:
list_multiplicities[i_mbr][i_site] = Fraction(mult)
# Loop over species on a site
species = re.findall("[A-Z][^A-Z]*", site_occupancy)
for sp in species:
# Find the species and its proportion on the site
species_split = re.split("([0-9][^A-Z]*)", sp)
name_of_species = species_split[0]
if len(species_split) == 1:
proportion_species_on_site = Fraction(1.0)
else:
proportion_species_on_site = Fraction(species_split[1])
solution_formulae[i_mbr][name_of_species] = solution_formulae[
i_mbr
].get(name_of_species, 0.0) + (
list_multiplicities[i_mbr][i_site] * proportion_species_on_site
)
if name_of_species not in sites[i_site]:
n_occupancies += 1
sites[i_site].append(name_of_species)
i_el = sites[i_site].index(name_of_species)
for parsed_mbr in range(len(list_occupancies)):
list_occupancies[parsed_mbr][i_site].append(0)
else:
i_el = sites[i_site].index(name_of_species)
list_occupancies[i_mbr][i_site][i_el] = proportion_species_on_site
# Loop over species after site
if len(site_split) != 1:
not_in_site = str(filter(None, site_split[1]))
not_in_site = not_in_site.replace(mult, "", 1)
for enamenumber in re.findall("[A-Z][^A-Z]*", not_in_site):
sp = list(filter(None, re.split(r"(\d+)", enamenumber)))
# Look up number of atoms of element
if len(sp) == 1:
nel = 1.0
else:
nel = float(float(sp[1]))
solution_formulae[i_mbr][sp[0]] = (
solution_formulae[i_mbr].get(sp[0], 0.0) + nel
)
# Site occupancies and multiplicities
endmember_occupancies = np.empty(shape=(n_endmembers, n_occupancies))
site_multiplicities = np.empty(shape=(n_endmembers, n_occupancies))
for i_mbr in range(n_endmembers):
n_species = 0
for i_site in range(n_sites):
for i_el in range(len(list_occupancies[i_mbr][i_site])):
endmember_occupancies[i_mbr][n_species] = list_occupancies[i_mbr][
i_site
][i_el]
site_multiplicities[i_mbr][n_species] = list_multiplicities[i_mbr][
i_site
]
n_species += 1
# Site names
solution_model.site_names = []
for i, species in enumerate(sites):
for sp in species:
solution_model.site_names.append("{0}_{1}".format(sp, ucase[i]))
# Finally, make attributes for solution model instance:
solution_model.solution_formulae = solution_formulae
solution_model.n_sites = n_sites
solution_model.sites = sites
solution_model.site_multiplicities = site_multiplicities
solution_model.n_occupancies = n_occupancies
solution_model.endmember_occupancies = endmember_occupancies
solution_model.endmember_noccupancies = np.einsum(
"ij, ij->ij", endmember_occupancies, site_multiplicities
)
solution_model.empty_formula = re.sub(
"([\\[]).*?([\\]])", "\\g<1>\\g<2>", solution_model.formulas[0]
)
split_empty = solution_model.empty_formula.split("[")
solution_model.general_formula = split_empty[0]
for i in range(n_sites):
solution_model.general_formula += f"[{','.join(sites[i])}{split_empty[i+1]}"
def site_occupancies_to_strings(
site_species_names, site_multiplicities, endmember_occupancies
):
"""
Converts a list of endmember site occupancies into a list
of string representations of those occupancies.
:param site_species_names: A list of list of strings,
giving the names of the species which reside on each site.
List of sites, each of which contains a list of the species
occupying each site.
:type site_species_names: 2D list of strings
:param site_multiplicities: List of floats giving the multiplicity
of each site. If 2D, must have the same shape as endmember_occupancies.
If 1D, must be either the same length as the number of sites, or
the same length as site_species_names
(with an implied repetition of the same
number for each species on a given site).
:type site_multiplicities: 1D or 2D numpy array of floats
:param endmember_occupancies: A list of site-species occupancies
for each endmember. The first dimension loops over the endmembers, and the
second dimension loops over the site-species occupancies for that endmember.
The total number and order of occupancies must
be the same as the strings in site_species_names.
:type endmember_occupancies: 2D numpy array of floats
:returns: A list of strings in standard burnman format.
For example, [Mg]3[Al]2 would correspond to the
classic two-site pyrope garnet.
:rtype: list of strings
"""
site_multiplicities = np.array(site_multiplicities)
endmember_occupancies = np.array(endmember_occupancies)
n_endmembers = endmember_occupancies.shape[0]
if len(site_multiplicities.shape) == 1:
# Site multiplicities should either be given on a per-site basis,
# or a per-species basis
if len(site_species_names) == len(site_multiplicities):
site_mults = []
for i, site in enumerate(site_species_names):
for species in site:
site_mults.append(site_multiplicities[i])
site_multiplicities = np.array(site_mults)
elif len(endmember_occupancies[0]) != len(site_multiplicities):
raise Exception(
"Site multiplicities should either be given "
"on a per-site basis or a per-species basis"
)
site_multiplicities = np.einsum(
"i, j->ij", np.ones(n_endmembers), site_multiplicities
)
elif len(site_multiplicities.shape) == 2:
if site_multiplicities.shape != endmember_occupancies.shape:
raise Exception(
"If site_multiplicities is 2D, it should have "
"the same shape as endmember_occupancies. "
"They currently have shapes "
f"{site_multiplicities.shape} and "
f"{endmember_occupancies.shape}."
)
else:
raise Exception("Site multiplicities should either be 1D or 2D.")
site_formulae = []
for i_mbr, mbr_occupancies in enumerate(endmember_occupancies):
i = 0
site_formulae.append("")
for site in site_species_names:
amounts = mbr_occupancies[i : i + len(site)]
mult = site_multiplicities[i_mbr, i]
if np.abs(mult - 1.0) < 1.0e-12:
mult = ""
else:
mult = str(nsimplify(mult))
amounts /= sum(amounts)
site_occupancy = formula_to_string(dict(zip(site, amounts)))
site_formulae[-1] += "[{0}]{1}".format(site_occupancy, mult)
i += len(site)
return site_formulae
def compositional_array(formulae):
"""
:param formulae: List of chemical formulae
:type formulae: list of dicts
:returns: Array of endmember formulae and a list of elements.
:rtype: 2D numpy.array of floats and a list of strs
"""
elements = []
for formula in formulae:
for element in formula:
if element not in elements:
elements.append(element)
formula_array = ordered_compositional_array(formulae, elements)
return formula_array, elements
def ordered_compositional_array(formulae, elements):
"""
:param formulae: List of chemical formulae
:type formulae: list of dicts
:param elements : List of elements
:type elements: list of strings
:returns: Array of endmember formulae
:rtype: 2D array of floats
"""
formula_array = np.zeros(shape=(len(formulae), len(elements)))
for idx, formula in enumerate(formulae):
for element in formula:
assert element in elements
formula_array[idx][elements.index(element)] = formula[element]
return formula_array
def formula_to_string(formula):
"""
:param formula: Chemical formula
:type formula: dict or Counter
:returns: A formula string, with element order as given in the list
IUPAC_element_order.
If one or more keys in the dictionary are not one of the elements
in the periodic table, then they are added at the end of the string.
:rtype: str
"""
formula_string = ""
for e in IUPAC_element_order:
if e in formula and np.abs(formula[e]) > 1.0e-12:
if np.abs(formula[e] - 1.0) < 1.0e-12:
formula_string += e
else:
formula_string += e + str(nsimplify(formula[e]))
for e in formula:
if e not in IUPAC_element_order:
if e in formula and np.abs(formula[e]) > 1.0e-12:
if np.abs(formula[e] - 1.0) < 1.0e-12:
formula_string += e
else:
formula_string += e + str(nsimplify(formula[e]))
return formula_string
def sort_element_list_to_IUPAC_order(element_list):
"""
:param element_list : List of elements.
:type element_list: list
:returns: List of elements sorted into IUPAC order
:rtype: list
"""
sorted_list = [e for e in IUPAC_element_order if e in element_list]
assert len(sorted_list) == len(element_list)
return sorted_list
def convert_fractions(composite, phase_fractions, input_type, output_type):
"""
Takes a composite with a set of user defined molar, volume
or mass fractions (which do not have to be the fractions
currently associated with the composite) and
converts the fractions to molar, mass or volume.
Conversions to and from mass require a molar mass to be
defined for all phases. Conversions to and from volume
require set_state to have been called for the composite.
:param composite: Composite for which fractions are to be defined.
:type composite: :class:`~burnman.Composite`
:param phase_fractions: List of input phase fractions
(of type input_type).
:type phase_fractions: list of floats
:param input_type: Input fraction type. One of 'molar', 'mass' or 'volume'.
:type input_type: str
:param output_type: Output fraction type. One of 'molar', 'mass' or 'volume'.
:type output_type: str
:returns: List of output phase fractions (of type output_type)
:rtype: list of floats
"""
if input_type == "volume" or output_type == "volume":
if composite.temperature is None:
raise Exception(
composite.to_string()
+ ".set_state(P, T) has not been called, so volume fractions are currently undefined. Exiting."
)
if input_type == "molar":
molar_fractions = phase_fractions
if input_type == "volume":
total_moles = sum(
volume_fraction / phase.molar_volume
for volume_fraction, phase in zip(phase_fractions, composite.phases)
)
molar_fractions = [
volume_fraction / (phase.molar_volume * total_moles)
for volume_fraction, phase in zip(phase_fractions, composite.phases)
]
if input_type == "mass":
total_moles = sum(
mass_fraction / phase.molar_mass
for mass_fraction, phase in zip(phase_fractions, composite.phases)
)
molar_fractions = [
mass_fraction / (phase.molar_mass * total_moles)
for mass_fraction, phase in zip(phase_fractions, composite.phases)
]
if output_type == "volume":
total_volume = sum(
molar_fraction * phase.molar_volume
for molar_fraction, phase in zip(molar_fractions, composite.phases)
)
output_fractions = [
molar_fraction * phase.molar_volume / total_volume
for molar_fraction, phase in zip(molar_fractions, composite.phases)
]
elif output_type == "mass":
total_mass = sum(
molar_fraction * phase.molar_mass
for molar_fraction, phase in zip(molar_fractions, composite.phases)
)
output_fractions = [
molar_fraction * phase.molar_mass / total_mass
for molar_fraction, phase in zip(molar_fractions, composite.phases)
]
elif output_type == "molar":
output_fractions = molar_fractions
return output_fractions
def reaction_matrix_as_strings(reaction_matrix, compound_names):
"""
Returns a list of string representations of all the reactions in
reaction_matrix.
:param reaction_matrix: Matrix of stoichiometric amounts
of each compound j in reaction i.
:type reaction_matrix: 2D numpy array
:param compound_names: List of compound names.
:type compound_names: list of strings
:returns: List of strings corresponding to each reaction.
:rtype: list of strings
"""
reaction_strings = []
for reaction in reaction_matrix:
lhs, rhs = ("", "")
for i, coefficient in enumerate(reaction):
if coefficient < -1.0e-10:
if len(lhs) > 0:
lhs += " + "
lhs += f"{-coefficient} {compound_names[i]}"
if coefficient > 1.0e-10:
if len(rhs) > 0:
rhs += " + "
rhs += f"{coefficient} {compound_names[i]}"
reaction_strings.append(f"{lhs} = {rhs}")
return reaction_strings
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@burnman@[email protected]@.PATH_END.py
|
{
"filename": "gaussian.py",
"repo_name": "minzastro/unidam",
"repo_path": "unidam_extracted/unidam-master/unidam/fitters/gaussian.py",
"type": "Python"
}
|
import numpy as np
from scipy.optimize import curve_fit
from unidam.utils.mathematics import kl_divergence, wstatistics
from unidam.fitters import basic
from unidam.utils.extra_functions import unidam_extra_functions as uef
from scipy.stats import norm
def tgauss(dummy, x, mu, sigma, lower, upper):
"""
Proxy for truncated Gaussian.
"""
result = np.zeros_like(x)
mask = (x >= lower) * (x <= upper)
result[mask] = uef.trunc_normal(x[mask], mu, sigma, lower, upper)
return result
class TGaussianFit(basic.PdfFitter):
USE_TRF = True
LETTER = 'T'
FUNC = tgauss
def _get_residual_sum_sq(self, solution):
return np.sum(self._get_residual_values(solution) ** 2)
def _get_residual_values(self, solution):
return self.y - self.FUNC(self.x,
solution[0][0],
solution[0][1],
self.x[solution[1]],
self.x[solution[2]])
def _get_function(self, solution):
def fit_fun(x, mu, sigma):
return uef.trunc_normal(x, mu, sigma,
self.x[solution[1]],
self.x[solution[2]])
return fit_fun
def _local_fit(self, solution):
try:
popt, pcov = curve_fit(self._get_function(solution),
self.x, self.y,
solution[0], method='trf',
ftol=1e-4, bounds=self.bounds)
if self.is_solution_ok(popt, pcov):
return popt
else:
return self.init_params
except ValueError:
return self.init_params
except RuntimeError:
return self.init_params
def _move_lower(self, solution):
y_pred = norm.pdf(x=self.x, loc=solution[0][0], scale=solution[0][1])
lower = solution[1]
while lower > 0:
lower -= 1
if np.abs(y_pred[lower]) < np.abs(self.y[lower] - y_pred[lower]):
lower += 1
break
new_solution = [solution[0], lower - 1, solution[2], 0, False]
#print(solution, new_solution)
test = self._local_fit(new_solution)
if test is not None:
new_solution[0] = test
residual = self._get_residual_sum_sq(new_solution)
new_solution[3] = residual
if residual < solution[3]:
new_solution[4] = True
else:
new_solution[1] += 1
if new_solution[1] == 0:
new_solution[4] = False
else:
new_solution[1] += 1
return new_solution
def _move_upper(self, solution):
new_solution = [solution[0], solution[1], solution[2] + 1, 0, False]
test = self._local_fit(new_solution)
if test is not None:
new_solution[0] = test
residual = self._get_residual_sum_sq(new_solution)
new_solution[3] = residual
if residual < solution[3]:
new_solution[4] = True
else:
new_solution[2] -= 1
if new_solution[2] >= len(self.x) - 1:
new_solution[4] = False
else:
new_solution[2] -= 1
return new_solution
def _fit(self):
modepos = np.argmax(self.y)
w = np.where(self.y > self.y.max() * 0.2)[0]
self.lower = w[0]
self.upper = w[-1]
if self.lower == self.upper:
self.lower = max(self.lower - 1, 0)
self.upper = min(self.upper + 1, len(self.x) - 1)
solution = [wstatistics(self.x, self.y, 2),
self.lower, self.upper,
0, True]
solution[0] = self._local_fit(solution)
solution[3] = self._get_residual_sum_sq(solution)
if modepos > 0:
while solution[-1] and self.lower > 0:
# Increase lower bound gradually,
# re-fitting at each step, while residuals decrease.
solution = self._move_lower(solution)
else:
solution[1] = 0
solution[-1] = True
if self.upper > modepos and self.upper < len(self.x) - 1:
while solution[-1]:
# Increase lower bound gradually,
# re-fitting at each step, while residuals decrease.
solution = self._move_upper(solution)
best = solution[0]
result = [best[0], best[1], self.x[solution[1]], self.x[solution[2]]]
return [result, kl_divergence(self.x, self.FUNC, result, self.y)]
def is_applicable(self):
return len(self.x) > 2
|
minzastroREPO_NAMEunidamPATH_START.@unidam_extracted@unidam-master@unidam@[email protected]@.PATH_END.py
|
{
"filename": "required_by_vounit.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/units/required_by_vounit.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines SI prefixed units that are required by the VOUnit standard
but that are rarely used in practice and liable to lead to confusion (such as
``msolMass`` for milli-solar mass). They are in a separate module from
`astropy.units.deprecated` because they need to be enabled by default for
`astropy.units` to parse compliant VOUnit strings. As a result, e.g.,
``Unit('msolMass')`` will just work, but to access the unit directly, use
``astropy.units.required_by_vounit.msolMass`` instead of the more typical idiom
possible for the non-prefixed unit, ``astropy.units.solMass``.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import cgs
from . import astrophys
from .core import def_unit, _add_prefixes
_add_prefixes(astrophys.solMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.solLum, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import (generate_unit_summary as _generate_unit_summary,
generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary)
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def _enable():
"""
Enable the VOUnit-required extra units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')``
idiom.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(_enable))
# Because these are VOUnit mandated units, they start enabled (which is why the
# function is hidden).
_enable()
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@astropy@units@[email protected]_END.py
|
{
"filename": "h5py_wrap.py",
"repo_name": "galtay/urchin",
"repo_path": "urchin_extracted/urchin-main/src/example_config/eagle/select_halo/h5py_wrap.py",
"type": "Python"
}
|
""" Simple convenience functions to access the h5py library. """
import h5py
import numpy as np
import os.path
__all__ = ['ra', 'raa', 'rd', 'wd', 'wa', 'cg']
class _Error(Exception):
"""Base class for exceptions in this module."""
pass
class _OverwriteError(_Error):
"""Exception raised for attempting to overwrite data. """
def __init__(self, message ):
Exception.__init__( self, message )
# Reading methods
#=========================================================================
def ra( fname, path, name ):
""" Read Attribute. Return a single attribute called <name> associated
with a group or dataset located at <path> in file <fname>.
e.g. ra( fname, '/PartType0/Coordinates', 'h-scale-exponent' ). """
attr = None
with h5py.File( fname, 'r' ) as h5f:
attr = h5f[path].attrs[name]
return attr
def raa( fname, path ):
""" Read All Attributes. Return a dictionary of all attributes associated
with a group or dataset located at <path> in file <fname>.
e.g. ra( fname, '/PartType0/Coordinates' ). """
attrs = {}
with h5py.File( fname, 'r' ) as h5f:
attr_names = h5f[path].attrs.keys()
for name in attr_names:
attrs[name] = h5f[path].attrs[name]
return attrs
def rd( fname, path, dtype=None ):
""" Read Data. Return a dataset located at <path> in file <fname> as
a numpy array.
e.g. rd( fname, '/PartType0/Coordinates' ). """
data = None
with h5py.File( fname, 'r' ) as h5f:
ds = h5f[path]
if dtype == None:
dtype = ds.dtype
data = np.zeros( ds.shape, dtype=dtype )
data = ds.value
return data
# Writing methods
#=========================================================================
def wd( fname, path, name, buf, attrs=None, overwrite=False ):
""" Write Data. Write a dataset stored in <buf> to hdf5 file <fname>
at location <path>/<name>. Optionally a dictionary of attributes
can be provided which will be written with the dataset.
e.g. wd( fname, '/PartType0', 'Coordinates', pos [,attrs] ). """
if os.path.exists( fname ):
access = 'a'
else:
access = 'w'
with h5py.File( fname, access ) as h5f:
ds_path = path + '/' + name
if access == 'a':
path_exists = ds_path in h5f
elif access == 'w':
path_exists = False
# if we are trying to overwrite a dataset w/o setting overwrite=True
#--------------------------------------------------------------------
if path_exists and not overwrite:
msg = '\n attempting to overwrite a dataset witout setting ' + \
'overwrite=True \n ' + \
'file name: ' + fname + ' \n ' + \
'path: ' + path + ' \n ' + \
'name: ' + name
raise _OverwriteError( msg )
return
# otherwise delete ds if needed and write new one
#--------------------------------------------------------------------
else:
if path_exists:
del h5f[ds_path]
h5f.create_dataset( ds_path, data=buf )
if attrs:
for k,v in attrs.items():
h5f[ds_path].attrs[k] = v
def wa( fname, path, name, buf ):
""" Write Attribute. Write a single attribute stored in <buf> called
<name> associated with a group or dataset located at <path> in file <fname>.
e.g. wa( fname, '/PartType0/Coordinates', 'h-scale-exponent', -1.0 ). """
if os.path.exists( fname ):
access = 'a'
else:
access = 'w'
with h5py.File( fname, access ) as h5f:
h5f[path].attrs[name] = buf
def cg( fname, path ):
""" Create Group. Creates a group at <path> in file <fname>.
e.g. cg( fname, '/PartType0' ). """
if os.path.exists( fname ):
access = 'a'
else:
access = 'w'
with h5py.File( fname, access ) as h5f:
h5f.require_group( path )
|
galtayREPO_NAMEurchinPATH_START.@urchin_extracted@urchin-main@src@example_config@eagle@select_halo@[email protected]_END.py
|
{
"filename": "mirrored_strategy.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/distribute/experimental/mirrored_strategy.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement a MirroredStrategy based on the DTensor low level API.
This is an experiment to validate the viability of the DTensor API, and expose
any potential feature gaps between the current API and the need.
"""
from tensorflow.dtensor.python import config as d_config
from tensorflow.dtensor.python import mesh_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute.experimental import dtensor_strategy_extended
from tensorflow.python.distribute.experimental import dtensor_util
from tensorflow.python.framework import device as tf_device
class MirroredStrategy(distribute_lib.Strategy):
"""Synchronous training across multiple replicas on one machine.
This strategy is typically used for training on one machine with multiple
accelerators (GPUs/TPUs).
For example, a variable created under a `MirroredStrategy` is a distributed
variable with layout replicated on each dimension. The variables will be
placed on the `mesh` that is specified in the __init__.
"""
def __init__(self, devices=None, cross_device_ops=None, *, mesh=None):
"""Synchronous training across multiple replicas on one machine.
Args:
devices: a list of device strings, such as ['/gpu:0', '/gpu:1']. If both
`mesh` and `devices` are None, all the available GPU/TPU will be used.
If no accelerators are found, CPU is used.
cross_device_ops: optional, a descendant of `CrossDeviceOps`. The value is
ignored at the moment, and support will be added later.
mesh: optional DTensor mesh for the computation. Note that either `mesh`
or `devices` should be provided, and not both. The mesh should be 1D,
and will be used to split the input data among that dimension.
"""
self._validate_init_args(mesh, devices)
if not mesh:
mesh = self._build_mesh_from_device_list(devices)
extended = dtensor_strategy_extended.DTensorStrategyExtended(
container_strategy=self, mesh=mesh)
super().__init__(extended)
self._mesh = mesh
self._devices = devices
@classmethod
def _validate_init_args(cls, mesh, devices):
if mesh and devices:
raise ValueError('Mesh and devices can not be provided at the same time. '
f'received mesh = {mesh}, devices = {devices}')
# For mirrored strategy, the mesh should be 1D, and only contains a batch
# dimension, we will use that dimension to shard the inputs.
if mesh and len(mesh.shape()) != 1:
raise ValueError('The mesh for MirroredStrategy must be 1D, received: '
f'{len(mesh.shape())}D')
@classmethod
def _build_mesh_from_device_list(cls, devices):
if devices:
device_type = tf_device.DeviceSpec.from_string(devices[0]).device_type
dtensor_util.initialize_accelerator_system_once(device_type)
mesh = mesh_util.create_mesh(
mesh_dims=[(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME, len(devices))],
devices=devices)
else:
# Trying to detect if there is any GPU/TPUs attached.
device_type = d_config.preferred_device_type()
devices = d_config.local_devices(device_type)
dtensor_util.initialize_accelerator_system_once(device_type)
mesh = mesh_util.create_mesh(
mesh_dims=[(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME, len(devices))],
device_type=device_type)
return mesh
def reduce(self, reduce_op, value, axis):
return dtensor_util.dtensor_reduce(self, reduce_op, value, axis)
@property
def mesh(self):
"""Returns the mesh used by the strategy."""
return self._mesh
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@distribute@experimental@[email protected]_END.py
|
{
"filename": "dif_dm_halos.py",
"repo_name": "dynamics-of-stellar-systems/dynamite",
"repo_path": "dynamite_extracted/dynamite-master/dev_tests/dif_dm_halos.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import logging
import time
import numpy as np
# Set matplotlib backend to 'Agg' (compatible when X11 is not running
# e.g., on a cluster). Note that the backend can only be set BEFORE
# matplotlib is used or even submodules are imported!
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy import table
import dynamite as dyn
def run_user_test(make_comp=False):
logger = logging.getLogger()
logger.info(f'Using DYNAMITE version: {dyn.__version__}')
logger.info(f'Located at: {dyn.__path__}')
# print to console anyway...
print('Using DYNAMITE version:', dyn.__version__)
print('Located at:', dyn.__path__)
# read configuration
if '__file__' in globals():
file_dir = os.path.dirname(__file__)
if file_dir:
os.chdir(file_dir)
fname = 'dif_dm_halos_config.yaml'
c = dyn.config_reader.Configuration(fname, reset_logging=True)
# delete previous output if available
c.remove_all_existing_output(wipe_all=True, create_tree=True)
# "run" the models
t = time.perf_counter()
smi = dyn.model_iterator.ModelIterator(c)
delt = time.perf_counter()-t
logger.info(f'Computation time: {delt} seconds = {delt/60} minutes')
# print to console regardless of logging level
print(f'Computation time: {delt} seconds = {delt/60} minutes')
# print all model results
c.all_models.table.pprint(max_lines=-1, max_width=-1)
return
if __name__ == '__main__':
run_user_test()
# end
|
dynamics-of-stellar-systemsREPO_NAMEdynamitePATH_START.@dynamite_extracted@dynamite-master@dev_tests@[email protected]_END.py
|
{
"filename": "demo_ROS_sensor.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/ros/demo_ROS_sensor.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2021 projectchrono.org
# All right reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Aaron Young
# =============================================================================
#
# Demo to show the use of Chrono::Sensor with ROS in python
#
# =============================================================================
import pychrono as ch
import pychrono.sensor as sens
import pychrono.ros as chros
def main():
# Create the system
sys = ch.ChSystemNSC()
# Add a mesh object to make the scene interesting
mmesh = ch.ChTriangleMeshConnected()
mmesh.LoadWavefrontMesh(ch.GetChronoDataFile(
"vehicle/hmmwv/hmmwv_chassis.obj"), False, True)
mmesh.Transform(ch.ChVector3d(0, 0, 0), ch.ChMatrix33d(1))
trimesh_shape = ch.ChVisualShapeTriangleMesh()
trimesh_shape.SetMesh(mmesh)
trimesh_shape.SetName("HMMWV Chassis Mesh")
trimesh_shape.SetMutable(False)
mesh_body = ch.ChBody()
mesh_body.SetPos(ch.ChVector3d(0, 0, 0))
mesh_body.AddVisualShape(trimesh_shape)
mesh_body.SetFixed(False)
mesh_body.SetMass(0)
sys.Add(mesh_body)
# This is the body we'll attach the sensors to
ground_body = ch.ChBodyEasyBox(1, 1, 1, 1000, False, False)
ground_body.SetPos(ch.ChVector3d(0, 0, 0))
ground_body.SetFixed(False)
ground_body.SetMass(0)
sys.Add(ground_body)
# Create the sensor manager
sens_manager = sens.ChSensorManager(sys)
intensity = 1.0
sens_manager.scene.AddPointLight(ch.ChVector3f(2, 2.5, 100), ch.ChColor(
intensity, intensity, intensity), 500.0)
sens_manager.scene.AddPointLight(ch.ChVector3f(9, 2.5, 100), ch.ChColor(
intensity, intensity, intensity), 500.0)
sens_manager.scene.AddPointLight(ch.ChVector3f(16, 2.5, 100), ch.ChColor(
intensity, intensity, intensity), 500.0)
sens_manager.scene.AddPointLight(ch.ChVector3f(23, 2.5, 100), ch.ChColor(
intensity, intensity, intensity), 500.0)
offset_pose = ch.ChFramed(ch.ChVector3d(-8, 0, 2),
ch.QuatFromAngleAxis(.2, ch.ChVector3d(0, 1, 0)))
cam = sens.ChCameraSensor(ground_body, 30, offset_pose, 1280, 720, 1.408)
cam.PushFilter(sens.ChFilterVisualize(1280, 720))
cam.PushFilter(sens.ChFilterRGBA8Access())
cam.SetName("camera")
sens_manager.AddSensor(cam)
lidar = sens.ChLidarSensor(ground_body, 5., offset_pose, 90, 300,
2*ch.CH_PI, ch.CH_PI / 12, -ch.CH_PI / 6, 100., 0)
lidar.PushFilter(sens.ChFilterDIAccess())
lidar.PushFilter(sens.ChFilterPCfromDepth())
lidar.PushFilter(sens.ChFilterXYZIAccess())
lidar.PushFilter(sens.ChFilterVisualizePointCloud(1280, 720, 1))
lidar.SetName("lidar")
sens_manager.AddSensor(lidar)
noise_model_none = sens.ChNoiseNone()
gps_reference = ch.ChVector3d(-89.4, 433.07, 260.)
gps = sens.ChGPSSensor(ground_body, 10, offset_pose,
gps_reference, noise_model_none)
gps.PushFilter(sens.ChFilterGPSAccess())
gps.SetName("gps")
sens_manager.AddSensor(gps)
acc = sens.ChAccelerometerSensor(
ground_body, 100, offset_pose, noise_model_none)
acc.PushFilter(sens.ChFilterAccelAccess())
acc.SetName("accelerometer")
sens_manager.AddSensor(acc)
gyro = sens.ChGyroscopeSensor(
ground_body, 100, offset_pose, noise_model_none)
gyro.PushFilter(sens.ChFilterGyroAccess())
gyro.SetName("gyroscope")
sens_manager.AddSensor(gyro)
mag = sens.ChMagnetometerSensor(
ground_body, 100, offset_pose, noise_model_none, gps_reference)
mag.PushFilter(sens.ChFilterMagnetAccess())
mag.SetName("magnetometer")
sens_manager.AddSensor(mag)
sens_manager.Update()
# Create ROS manager
ros_manager = chros.ChROSPythonManager()
ros_manager.RegisterHandler(chros.ChROSClockHandler())
ros_manager.RegisterHandler(chros.ChROSCameraHandler(
cam.GetUpdateRate() / 4, cam, "~/output/camera/data/image"))
ros_manager.RegisterHandler(chros.ChROSLidarHandler(
lidar, "~/output/lidar/data/pointcloud"))
ros_manager.RegisterHandler(
chros.ChROSGPSHandler(gps, "~/output/gps/data"))
acc_handler = chros.ChROSAccelerometerHandler(
acc, "~/output/accelerometer/data")
ros_manager.RegisterHandler(acc_handler)
gyro_handler = chros.ChROSGyroscopeHandler(
gyro, "~/output/gyroscope/data")
ros_manager.RegisterHandler(gyro_handler)
mag_handler = chros.ChROSMagnetometerHandler(
mag, "~/output/magnetometer/data")
ros_manager.RegisterHandler(mag_handler)
imu_handler = chros.ChROSIMUHandler(100, "~/output/imu/data")
imu_handler.SetAccelerometerHandler(acc_handler)
imu_handler.SetGyroscopeHandler(gyro_handler)
imu_handler.SetMagnetometerHandler(mag_handler)
ros_manager.RegisterHandler(imu_handler)
ros_manager.Initialize()
# Simulation loop
time = 0
time_step = 1e-3
time_end = 100
# Give the ground body some rotational velocity so that the sensors attached to it appear to be moving
# Note how the gyroscopes angular velocity in ROS will read 0.1 on the z-axis
ground_body.SetAngVelParent(ch.ChVector3d(0, 0, 0.1))
while time < time_end:
time = sys.GetChTime()
# Updates
sens_manager.Update()
if not ros_manager.Update(time, time_step):
break
sys.DoStepDynamics(time_step)
if __name__ == "__main__":
main()
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@ros@[email protected]_END.py
|
{
"filename": "testCOutsideComplexity.py",
"repo_name": "terryyin/lizard",
"repo_path": "lizard_extracted/lizard-master/test/testCOutsideComplexity.py",
"type": "Python"
}
|
import unittest
from .testHelpers import get_cpp_function_list_with_extension
from lizard_ext.lizardoutside import LizardExtension as CountOutsideComplexity
def analyze_with_outside_extension(code):
return get_cpp_function_list_with_extension(code, CountOutsideComplexity())
class Test_complexity_in_c_marco(unittest.TestCase):
def test_no_complexity_outside_function_global_cc_should_be_one(self):
result = analyze_with_outside_extension("")
self.assertEqual(1, len(result))
self.assertEqual("*global*", result[0].name)
self.assertEqual(1, result[0].cyclomatic_complexity)
def test_complexity_outside_should_be_counted(self):
result = analyze_with_outside_extension("#if a==b")
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_complexity_outside_should_be_counted_when_there_is_function(self):
result = analyze_with_outside_extension("#if a==b\n void fun() {if(1);}\n #if 1")
self.assertEqual("*global*", result[1].name)
self.assertEqual(3, result[1].cyclomatic_complexity)
|
terryyinREPO_NAMElizardPATH_START.@lizard_extracted@lizard-master@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/table/tests/__init__.py",
"type": "Python"
}
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@table@tests@[email protected]_END.py
|
|
{
"filename": "_visible.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermapbox/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="scattermapbox", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scattermapbox@[email protected]_END.py
|
{
"filename": "fenced_doctest_lib.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/docs/fenced_doctest_lib.py",
"type": "Python"
}
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run doctests for tensorflow."""
import ast
import doctest
import os
import re
import textwrap
from typing import Any, Callable, Dict, Iterable, Optional
import astor
from tensorflow.tools.docs import tf_doctest_lib
def load_from_files(
files,
globs: Optional[Dict[str, Any]] = None,
set_up: Optional[Callable[[Any], None]] = None,
tear_down: Optional[Callable[[Any], None]] = None) -> doctest.DocFileSuite:
"""Creates a doctest suite from the files list.
Args:
files: A list of file paths to test.
globs: The global namespace the tests are run in.
set_up: Run before each test, receives the test as argument.
tear_down: Run after each test, receives the test as argument.
Returns:
A DocFileSuite containing the tests.
"""
if globs is None:
globs = {}
# __fspath__ isn't respected everywhere in doctest so convert paths to
# strings.
files = [os.fspath(f) for f in files]
globs['_print_if_not_none'] = _print_if_not_none
# Ref: https://docs.python.org/3/library/doctest.html#doctest.DocFileSuite
return doctest.DocFileSuite(
*files,
module_relative=False,
parser=FencedCellParser(fence_label='python'),
globs=globs,
setUp=set_up,
tearDown=tear_down,
checker=FencedCellOutputChecker(),
optionflags=(doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_BLANKLINE),
)
class FencedCellOutputChecker(tf_doctest_lib.TfDoctestOutputChecker):
"""TfDoctestChecker with a different warning message."""
MESSAGE = textwrap.dedent("""\n
##############################################################
# Check the documentation (go/g3doctest) on how to write
# testable g3docs.
##############################################################
""")
class FencedCellParser(doctest.DocTestParser):
"""Implements test parsing for ``` fenced cells.
https://docs.python.org/3/library/doctest.html#doctestparser-objects
The `get_examples` method receives a string and returns an
iterable of `doctest.Example` objects.
"""
patched = False
def __init__(self, fence_label='python'):
super().__init__()
if not self.patched:
# The default doctest compiles in "single" mode. The fenced block may
# contain multiple statements. The `_patch_compile` function fixes the
# compile mode.
doctest.compile = _patch_compile
print(
textwrap.dedent("""
*********************************************************************
* Caution: `fenced_doctest` patches `doctest.compile` don't use this
* in the same binary as any other doctests.
*********************************************************************
"""))
type(self).patched = True
# Match anything, except if the look-behind sees a closing fence.
no_fence = '(.(?<!```))*?'
self.fence_cell_re = re.compile(
rf"""
^( # After a newline
\s*```\s*({fence_label})\n # Open a labeled ``` fence
(?P<doctest>{no_fence}) # Match anything except a closing fence
\n\s*```\s*(\n|$) # Close the fence.
)
( # Optional!
[\s\n]* # Any number of blank lines.
```\s*\n # Open ```
(?P<output>{no_fence}) # Anything except a closing fence
\n\s*``` # Close the fence.
)?
""",
# Multiline so ^ matches after a newline
re.MULTILINE |
# Dotall so `.` matches newlines.
re.DOTALL |
# Verbose to allow comments/ignore-whitespace.
re.VERBOSE)
def get_examples(self,
string: str,
name: str = '<string>') -> Iterable[doctest.Example]:
# Check for a file-level skip comment.
if re.search('<!--.*?doctest.*?skip.*?all.*?-->', string, re.IGNORECASE):
return
for match in self.fence_cell_re.finditer(string):
if re.search('doctest.*skip', match.group(0), re.IGNORECASE):
continue
groups = match.groupdict()
source = textwrap.dedent(groups['doctest'])
want = groups['output']
if want is not None:
want = textwrap.dedent(want)
yield doctest.Example(
lineno=string[:match.start()].count('\n') + 1,
source=source,
want=want)
def _print_if_not_none(obj):
"""Print like a notebook: Show the repr if the object is not None.
`_patch_compile` Uses this on the final expression in each cell.
This way the outputs feel like notebooks.
Args:
obj: the object to print.
"""
if obj is not None:
print(repr(obj))
def _patch_compile(source,
filename,
mode,
flags=0,
dont_inherit=False,
optimize=-1):
"""Patch `doctest.compile` to make doctest to behave like a notebook.
Default settings for doctest are configured to run like a repl: one statement
at a time. The doctest source uses `compile(..., mode="single")`
So to let doctest act like a notebook:
1. We need `mode="exec"` (easy)
2. We need the last expression to be printed (harder).
To print the last expression, just wrap the last expression in
`_print_if_not_none(expr)`. To detect the last expression use `AST`.
If the last node is an expression modify the ast to call
`_print_if_not_none` on it, convert the ast back to source and compile that.
https://docs.python.org/3/library/functions.html#compile
Args:
source: Can either be a normal string, a byte string, or an AST object.
filename: Argument should give the file from which the code was read; pass
some recognizable value if it wasn’t read from a file ('<string>' is
commonly used).
mode: [Ignored] always use exec.
flags: Compiler options.
dont_inherit: Compiler options.
optimize: Compiler options.
Returns:
The resulting code object.
"""
# doctest passes some dummy string as the file name, AFAICT
# but tf.function freaks-out if this doesn't look like a
# python file name.
del filename
# Doctest always passes "single" here, you need exec for multiple lines.
del mode
source_ast = ast.parse(source)
final = source_ast.body[-1]
if isinstance(final, ast.Expr):
# Wrap the final expression as `_print_if_not_none(expr)`
print_it = ast.Expr(
lineno=-1,
col_offset=-1,
value=ast.Call(
func=ast.Name(
id='_print_if_not_none',
ctx=ast.Load(),
lineno=-1,
col_offset=-1),
lineno=-1,
col_offset=-1,
args=[final], # wrap the final Expression
keywords=[]))
source_ast.body[-1] = print_it
# It's not clear why this step is necessary. `compile` is supposed to handle
# AST directly.
source = astor.to_source(source_ast)
return compile(
source,
filename='dummy.py',
mode='exec',
flags=flags,
dont_inherit=dont_inherit,
optimize=optimize)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@docs@[email protected]_END.py
|
{
"filename": "spleaf_esp_activity_slow.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/models/spleaf_esp_activity_slow.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import *
from pyorbit.models.abstract_model import *
from pyorbit.keywords_definitions import *
from scipy.linalg import cho_factor, cho_solve, lapack, LinAlgError
from scipy import matrix, spatial
import sys
__all__ = ['SPLEAF_ESP_slow']
try:
from spleaf import cov as spleaf_cov
from spleaf import term as spleaf_term
except (ModuleNotFoundError,ImportError):
pass
class SPLEAF_ESP_slow(AbstractModel):
''' Three parameters out of four are the same for all the datasets, since they are related to
the properties of the physical process rather than the observed effects on a dataset
From Grunblatt+2015, Affer+2016
- theta: is usually related to the rotation period of the star( or one of its harmonics);
- lambda: is the correlation decay timescale, and it can be related to the lifetime of the active regions.
- omega: is the length scale of the periodic component, and can be linked to the size evolution of the active regions;
- h: represents the amplitude of the correlations '''
default_common = 'activity'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = 'spleaf_esp'
self.internal_likelihood = True
self.list_pams_common = OrderedSet([
'Prot', # Rotational period of the star
'Pdec', # Decay timescale of activity
'Oamp', # Granulation of activity
])
self.list_pams_dataset = OrderedSet([
'Hamp' # Amplitude of the signal in the covariance matrix
])
try:
from spleaf import cov as spleaf_cov
from spleaf import term as spleaf_term
except (ModuleNotFoundError,ImportError):
print("ERROR: S+LEAF package not installed, this will not work")
quit()
self.n_harmonics = 4
def initialize_model(self, mc, **kwargs):
self.n_harmonics = kwargs.get('n_harmonics', self.n_harmonics)
print(' S+LEAF model, number of harmonics:', self.n_harmonics)
print()
if kwargs.get('hyperparameters_condition', False):
self.hyper_condition = self._hypercond_01
else:
self.hyper_condition = self._hypercond_00
if kwargs.get('rotation_decay_condition', False):
self.rotdec_condition = self._hypercond_02
else:
self.rotdec_condition = self._hypercond_00
if kwargs.get('halfrotation_decay_condition', False):
self.halfrotdec_condition = self._hypercond_03
else:
self.halfrotdec_condition = self._hypercond_00
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_rotation_period = getattr(mc.common_models[common_ref], 'use_stellar_rotation_period', False)
break
for keyword in keywords_stellar_rotation:
self.use_stellar_rotation_period = kwargs.get(keyword, self.use_stellar_rotation_period)
if self.use_stellar_rotation_period:
self.list_pams_common.update(['rotation_period'])
self.list_pams_common.discard('Prot')
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_activity_decay = getattr(mc.common_models[common_ref], 'use_stellar_activity_decay', False)
break
for keyword in keywords_stellar_activity_decay:
self.use_stellar_activity_decay = kwargs.get(keyword, self.use_stellar_activity_decay)
if self.use_stellar_activity_decay:
self.list_pams_common.update(['activity_decay'])
self.list_pams_common.discard('Pdec')
def lnlk_compute(self, parameter_values, dataset):
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
if self.use_stellar_activity_decay:
parameter_values['Pdec'] = parameter_values['activity_decay']
if not self.hyper_condition(parameter_values):
return -np.inf
if not self.rotdec_condition(parameter_values):
return -np.inf
if not self.halfrotdec_condition(parameter_values):
return -np.inf
""" I'm creating the kernel here has """
D = spleaf_cov.Cov(dataset.x0,
err=spleaf_term.Error(np.sqrt(dataset.e ** 2.0 + dataset.jitter ** 2.0)),
GP=spleaf_term.ESPKernel(parameter_values['Hamp'],
parameter_values['Prot'],
parameter_values['Pdec'],
parameter_values['Oamp'],
nharm=self.n_harmonics))
return D.loglike(dataset.residuals)
def sample_predict(self, parameter_values, dataset, x0_input=None, return_covariance=False, return_variance=False):
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
""" I'm creating the kernel here has """
D = spleaf_cov.Cov(dataset.x0,
err=spleaf_term.Error(np.sqrt(dataset.e ** 2.0 + dataset.jitter ** 2.0)),
GP=spleaf_term.ESPKernel(parameter_values['Hamp'],
parameter_values['Prot'],
parameter_values['Pdec'],
parameter_values['Oamp'],
nharm=self.n_harmonics))
if x0_input is None:
t_predict = dataset.x0
else:
t_predict = x0_input
mu, var = D.conditional(dataset.residuals, t_predict, calc_cov='diag')
if return_variance:
return mu, np.sqrt(var)
else:
return mu
@staticmethod
def _hypercond_00(parameter_values):
#Condition from Rajpaul 2017, Rajpaul+2021
return True
@staticmethod
def _hypercond_01(parameter_values):
# Condition from Rajpaul 2017, Rajpaul+2021
# Taking into account that Pdec^2 = 2*lambda_2^2
return parameter_values['Pdec']**2 > (3. / 2. / np.pi) * parameter_values['Oamp']**2 * parameter_values['Prot']**2
@staticmethod
def _hypercond_02(parameter_values):
#Condition on Rotation period and decay timescale
return parameter_values['Pdec'] > 2. * parameter_values['Prot']
@staticmethod
def _hypercond_03(parameter_values):
#Condition on Rotation period and decay timescale
return parameter_values['Pdec'] > 0.5 * parameter_values['Prot']
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@models@[email protected]_END.py
|
{
"filename": "fresnel_test.ipynb",
"repo_name": "Jashcraf/poke",
"repo_path": "poke_extracted/poke-main/experiments/weber/fresnel_test.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import poppy
import astropy.units as u
from custom_optics import ShiftedGaussianAperture
```
# Set up poppy optical system with decentered aperture and defocus
```python
pupil_d = 25.4e-3*u.m
wvl = 1e-6*u.m
npix = 512
OS = 1
efl = 100e-3*u.m
aper = ShiftedGaussianAperture(w=3e-3*u.m,shifty=1e-2*u.m,pupil_diam=pupil_d)
thin_lens = poppy.QuadraticLens(efl)
plt.figure(figsize=[11,5])
aper.display(what='both')
plt.show()
wf = poppy.FresnelWavefront(pupil_d,wavelength=wvl,npix=npix,oversample=OS)
wf *= aper
wf *= thin_lens
wf.propagate_fresnel(efl + 10e-3*u.m)
plt.figure(figsize=[11,5])
wf.display(what='both',colorbar=True)
plt.show()
```

Oversampling > 2x suggested for reliable results in Fresnel propagation.

```python
# Use wf as our template for detector of beamlet simulation
pixscale = wf.pixelscale.value # m'/pix
size = wf.amplitude.shape[0]
x = np.linspace(-size*pixscale/2,size*pixscale/2,size)
x,y = np.meshgrid(x,x)
r2 = np.array([x,y])
r2 = np.moveaxis(r2,-1,0)
r2 = np.moveaxis(r2,-1,0)
r2 = r2[...,np.newaxis]
print(r2.shape)
```
(512, 512, 2, 1)
# Configure a Ray Transfer Matrix and Gaussian Beam Propgation using the proposed method
```python
def propagate_qpinv_abcd(Qinv,A,B,C,D):
num = C + D @ Qinv
den = A + B @ Qinv
return num @ np.linalg.inv(den)
def propagate_aligned_gaussian(Qpinv,x2,y2,k):
phase = -1j*k/2*((x2*Qpinv[0,0] + y2*Qpinv[1,0])*x2 + (x2*Qpinv[0,1] + y2*Qpinv[1,1])*y2)
return np.exp(phase)
def compute_misalign_phase(B,A,r1m,r2,k):
"""
Parameters
----------
B,A : numpy.ndarrays
elements of the ray transfer matrix
r1m : numpy.ndarray of size 2
misalignment in position in x then y
r2 : numpy.ndarray of dimension 2
detector coordinates in x and y. First dimension holds x/y, second holds coordinate
k : float, complex
wave number of simulation
"""
Binv = np.linalg.inv(B)
BinvA = Binv @ A
t1 = r1m.T @ BinvA @ r1m
t1 = t1[...,0,0]
t2 = -2 * r1m.T @ Binv @ r2
t2 = t2[...,0,0]
return np.exp(-1j*k/2*(t1 + t2))
def compute_gaussian_amplitude(Qpinv,A,B):
den = A + np.linalg.inv(B) @ Qpinv
return 1/np.sqrt(np.linalg.det(den))
def propagate_misaligned_gaussian(Qpinv,r1m,r2,k,A,B,rayprop):
x2,y2 = r2[...,0,0]-rayprop[0,0],r2[...,1,0]-rayprop[1,0]
misalign = compute_misalign_phase(B,A,r1m,r2,k)
aligned = propagate_aligned_gaussian(Qpinv,x2,y2,k)
amplitude = compute_gaussian_amplitude(Qpinv,A,B)
return amplitude*misalign*aligned
# system parameters
efl = 100e-3 # meters
prop = efl + 10e-3
wo = 3e-3
qinv = -1j*wvl.value / (np.pi * wo**2)
Qinv = np.array([[qinv,0],
[0,qinv]])
# misalign vector
r1m = np.array([0,1e-2])[...,np.newaxis]
ray = np.array([0,1e-2,0,0])[...,np.newaxis]
# set up detector
# make ray transfer matrices
focus = np.array([[1,0,0,0],
[0,1,0,0],
[-1/efl,0,1,0],
[0,-1/efl,0,1]])
propagate = np.array([[1,0,prop,0],
[0,1,0,prop],
[0,0,1,0],
[0,0,0,1]])
ABCD = propagate @ focus
A = ABCD[:2,:2]
B = ABCD[:2,2:4]
C = ABCD[2:4,:2]
D = ABCD[2:4,2:4]
rayprop = ABCD @ ray
# propagate the Q matrix
Qpinv = propagate_qpinv_abcd(Qinv,A,B,C,D)
field = propagate_misaligned_gaussian(Qpinv,r1m,r2,2*np.pi/wvl.value,A,B,rayprop)
field /= np.max(np.abs(field))
dummy = np.linspace(-1,1,2048)
plt.figure(figsize=[11,5])
plt.subplot(121)
plt.imshow(np.abs(field),cmap='gray')
plt.colorbar()
plt.subplot(122)
plt.imshow(np.angle(field),cmap='RdBu')
plt.colorbar()
plt.show()
```

```python
from scipy.ndimage import shift
from matplotlib.colors import LogNorm
field_poppy = shift(wf.wavefront,[-0.5,-0.5])
field_poppy /= np.max(np.abs(field_poppy))
mid = int(field_poppy.shape[0]/2)
cut = 128
yshift = -100
frac_dif = 3
threshold = 1/(np.exp(1)**3)
mask = np.abs(np.copy(field_poppy))
mask[field_poppy > threshold] = 1
mask[field_poppy <= threshold] = 0
frac_dif_field = (np.abs(field_poppy)**2-np.abs(field)**2)/np.abs(field_poppy)**2 * 100
plt.figure(figsize=[15,4])
plt.subplot(131)
plt.title('POPPY')
plt.imshow(np.abs(field_poppy)**2,cmap='gray')
plt.colorbar()
plt.xlim([mid-cut,mid+cut])
plt.ylim([mid-cut+yshift,mid+cut+yshift])
plt.subplot(132)
plt.title('New Beamlet Algorithm')
plt.imshow(np.abs(field)**2,cmap='gray')
plt.colorbar()
plt.xlim([mid-cut,mid+cut])
plt.ylim([mid-cut+yshift,mid+cut+yshift])
plt.subplot(133)
plt.title('Fractional Difference [%]')
plt.imshow(frac_dif_field,cmap='RdBu',vmin=-frac_dif,vmax=frac_dif)
plt.xlim([mid-cut,mid+cut])
plt.ylim([mid-cut+yshift,mid+cut+yshift])
plt.colorbar()
plt.show()
```
<ipython-input-102-ae4747493ebb>:14: RuntimeWarning: divide by zero encountered in divide
frac_dif_field = (np.abs(field_poppy)**2-np.abs(field)**2)/np.abs(field_poppy)**2 * 100

```python
from scipy.ndimage import center_of_mass
# print(center_of_mass(np.abs(field)**2))
# print(center_of_mass(np.abs(field_poppy)**2))
print(np.std(np.abs(field)**2 - np.abs(field_poppy)**2))
```
(154.90947355002203, 255.49999999999991)
(154.71262320456503, 255.50000000000003)
0.0005096343874363536
```python
def compute_std_vs_oversample():
stdlist = []
os = np.arange(1,33,1)
for OS in os:
OS = int(OS)
"The POPPY Simulation"
pupil_d = 25.4e-3*u.m
wvl = 1e-6*u.m
npix = 128
efl = 100e-3
prop = 100e-3 + 1e-3
aper = ShiftedGaussianAperture(w=1e-3*u.m,shifty=1e-2*u.m,pupil_diam=pupil_d)
thin_lens = poppy.QuadraticLens(efl*u.m)
wf = poppy.FresnelWavefront(pupil_d,wavelength=wvl,npix=npix,oversample=OS)
wf *= aper
wf *= thin_lens
wf.propagate_fresnel(prop*u.m)
field_poppy = shift(wf.wavefront,[-0.5,-0.5])
field_poppy /= np.max(np.abs(field_poppy))
"the beamlet simulation"
# Use wf as our template for detector of beamlet simulation
pixscale = wf.pixelscale.value # m'/pix
size = wf.amplitude.shape[0]
x = np.linspace(-size*pixscale/2,size*pixscale/2,size)
x,y = np.meshgrid(x,x)
r2 = np.array([x,y])
r2 = np.moveaxis(r2,-1,0)
r2 = np.moveaxis(r2,-1,0)
r2 = r2[...,np.newaxis]
wo = 1e-3
qinv = -1j*wvl.value / (np.pi * wo**2)
Qinv = np.array([[qinv,0],
[0,qinv]])
# misalign vector
r1m = np.array([0,1e-2])[...,np.newaxis]
ray = np.array([0,1e-2,0,0])[...,np.newaxis]
# set up detector
# make ray transfer matrices
focus = np.array([[1,0,0,0],
[0,1,0,0],
[-1/efl,0,1,0],
[0,-1/efl,0,1]])
propagate = np.array([[1,0,prop,0],
[0,1,0,prop],
[0,0,1,0],
[0,0,0,1]])
ABCD = propagate @ focus
A = ABCD[:2,:2]
B = ABCD[:2,2:4]
C = ABCD[2:4,:2]
D = ABCD[2:4,2:4]
rayprop = ABCD @ ray
# propagate the Q matrix
Qpinv = propagate_qpinv_abcd(Qinv,A,B,C,D)
field = propagate_misaligned_gaussian(Qpinv,r1m,r2,2*np.pi/wvl.value,A,B,rayprop)
field /= np.max(np.abs(field))
# Compute the difference
diff_irradiance = np.abs(field)**2 - np.abs(field_poppy)**2
stdlist.append(np.std(diff_irradiance))
return os,stdlist
```
```python
os,stddev = compute_std_vs_oversample()
```
Oversampling > 2x suggested for reliable results in Fresnel propagation.
```python
plt.figure()
plt.plot(os,stddev)
plt.xlabel('Oversampling Factor')
plt.ylabel('rms difference')
plt.yscale('log')
plt.show()
```

```python
# config proper ticks
nticks = 5
fsize=14
ticklocs = np.linspace(0,field.shape[0],nticks)
ticks = np.linspace(-pixscale*field.shape[0]/2,pixscale*field.shape[0]/2,nticks)*1e3
tickstr = [f'{i:.2}' for i in ticks]
ytickstr = [f'{i+100*1e3*pixscale:.2}' for i in ticks]
stddev_scaled = [i*1e3 for i in stddev]
fig,axs = plt.subplots(ncols=3,figsize=[15,4])
for ax,data,title in zip(axs[:2],[field_poppy,field],['Fresnel','Proposed']):
ax.imshow(np.abs(data)**2,cmap='inferno')
ax.set_xticks(ticklocs)
ax.set_xticklabels(tickstr)
ax.set_yticks(ticklocs)
ax.set_yticklabels(ytickstr)
ax.set_ylabel('Transverse Distance [mm]',fontsize=fsize)
ax.set_xlabel('Transverse Distance [mm]',fontsize=fsize)
ax.set_title(title,fontsize=16)
ax.set_ylabel('')
# configure the next plot
axs[2].plot(os,stddev)
axs[2].set_xlabel('Oversampling Factor',fontsize=fsize)
axs[2].set_ylabel('RMS Difference',fontsize=fsize)
axs[2].set_yscale('log')
plt.show()
```

```python
```
|
JashcrafREPO_NAMEpokePATH_START.@poke_extracted@poke-main@experiments@weber@[email protected]_END.py
|
{
"filename": "query_workflow.py",
"repo_name": "LSSTDESC/gen3_workflow",
"repo_path": "gen3_workflow_extracted/gen3_workflow-master/python/desc/gen3_workflow/query_workflow.py",
"type": "Python"
}
|
"""
Module to extract status info of the workflow tasks from the
monitoring.db file.
"""
import os
from collections import defaultdict
import sqlite3
import numpy as np
import pandas as pd
__all__ = ['query_workflow', 'print_status', 'get_task_name']
def is_uuid(value):
"""Check if the passed value is formatted like a UUID."""
sizes = tuple([len(_) for _ in value.split('-')])
return sizes == (8, 4, 4, 4, 12)
def get_task_name(job_name, bps_config=None):
"""Extract the task name from the GenericWorkflowJob name."""
# Get cluster names from any quantum clustering specification
# in the bps config yaml.
tokens = job_name.split('_')
if bps_config is not None:
cluster_names = list(bps_config['cluster'].keys())
if tokens[0] in cluster_names:
# In case of quantum clustering, we use the cluster name as
# the task name.
return tokens[0]
# If bps_config is None or if the tokens[0] is not in
# cluster_names, then check if it is formatted like a uuid,
# in which case tokens[1] is the task name.
if is_uuid(tokens[0]):
return tokens[1]
# Finally, for backwards compatibility with weeklies prior to
# w_2022_01, check if tokens[0] can be cast as an int. If not,
# then it's the cluster name.
try:
_ = int(tokens[0])
except ValueError:
return tokens[0]
return tokens[1]
def query_workflow(workflow_name, db_file='./runinfo/monitoring.db'):
"""
Query the workflow, task, and status tables for the
status of each task. Use the task.task_stderr as the unique
identifier of each task.
"""
if not os.path.isfile(db_file):
raise FileNotFoundError(db_file)
with sqlite3.connect(db_file) as conn:
df = pd.read_sql('select * from workflow where '
f'workflow_name="{workflow_name}"', conn)
if df.empty:
raise FileNotFoundError(f'workflow {workflow_name}'
'not in {db_file}')
query = f'''select task.task_stderr, status.task_status_name,
status.timestamp
from task join status on task.task_id=status.task_id and
task.run_id=status.run_id join workflow
on task.run_id=workflow.run_id where
workflow.workflow_name="{workflow_name}"
and task.task_stderr is not null
order by task.task_stderr, status.timestamp desc'''
with sqlite3.connect(db_file) as conn:
df0 = pd.read_sql(query, conn)
data = defaultdict(list)
task_stderrs = set()
for _, row in df0.iterrows():
if (row['task_stderr'] in task_stderrs and
row['task_status_name'] != "exec_done"):
continue
task_stderrs.add(row['task_stderr'])
job_name = os.path.basename(row['task_stderr']).split('.')[0]
data['job_name'].append(job_name)
task_type = get_task_name(job_name)
data['task_type'].append(task_type)
data['status'].append(row['task_status_name'])
if not data:
# No tasks have been processed yet, so return an empty dataframe.
return pd.DataFrame()
idx = np.array(data['status']) != "running_ended"
return pd.DataFrame(data=data)[idx]
def print_status(df, task_types=None):
"""
Given a dataframe from `query_workflow(...)` and a list of task types,
print the numbers of each task types for each status value.
"""
if task_types is None:
task_types = sorted(list(set(df['task_type'])))
wtt = 8
for task_type in task_types:
if len(task_type) > wtt:
wtt = len(task_type)
# statuses = ('pending launched running running_ended exec_done '
# 'failed dep_fail'.split())
statuses = 'pending launched running exec_done failed dep_fail'.split()
spacer = ' '
print(f'{"task_type":{wtt}}', end=spacer)
for status in statuses:
print(f'{status:>10}', end=spacer)
print(f'{"total":>10}')
for task_type in task_types:
print(f'{task_type:{wtt}}', end=spacer)
df1 = df.query(f'task_type == "{task_type}"')
for status in statuses:
df2 = df1.query(f'status == "{status}"')
print(f'{len(df2):10d}', end=spacer)
print(f'{len(df1):10d}')
|
LSSTDESCREPO_NAMEgen3_workflowPATH_START.@gen3_workflow_extracted@gen3_workflow-master@python@desc@gen3_workflow@[email protected]_END.py
|
{
"filename": "_argparse.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/argcomplete/py3/argcomplete/packages/_argparse.py",
"type": "Python"
}
|
# Copyright 2012-2023, Andrey Kislyuk and argcomplete contributors. Licensed under the terms of the
# `Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0>`_. Distribution of the LICENSE and NOTICE
# files with source copies of this package and derivative works is **REQUIRED** as specified by the Apache License.
# See https://github.com/kislyuk/argcomplete for more info.
# This file contains argparse introspection utilities used in the course of argcomplete execution.
from argparse import (
ONE_OR_MORE,
OPTIONAL,
PARSER,
REMAINDER,
SUPPRESS,
ZERO_OR_MORE,
Action,
ArgumentError,
ArgumentParser,
_get_action_name,
_SubParsersAction,
)
from gettext import gettext
from typing import Dict, List, Set, Tuple
_num_consumed_args: Dict[Action, int] = {}
def action_is_satisfied(action):
'''Returns False if the parse would raise an error if no more arguments are given to this action, True otherwise.'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.nargs in [OPTIONAL, ZERO_OR_MORE, REMAINDER]:
return True
if action.nargs == ONE_OR_MORE:
return num_consumed_args >= 1
if action.nargs == PARSER:
# Not sure what this should be, but this previously always returned False
# so at least this won't break anything that wasn't already broken.
return False
if action.nargs is None:
return num_consumed_args == 1
assert isinstance(action.nargs, int), 'failed to handle a possible nargs value: %r' % action.nargs
return num_consumed_args == action.nargs
def action_is_open(action):
'''Returns True if action could consume more arguments (i.e., its pattern is open).'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.nargs in [ZERO_OR_MORE, ONE_OR_MORE, PARSER, REMAINDER]:
return True
if action.nargs == OPTIONAL or action.nargs is None:
return num_consumed_args == 0
assert isinstance(action.nargs, int), 'failed to handle a possible nargs value: %r' % action.nargs
return num_consumed_args < action.nargs
def action_is_greedy(action, isoptional=False):
'''Returns True if action will necessarily consume the next argument.
isoptional indicates whether the argument is an optional (starts with -).
'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.option_strings:
if not isoptional and not action_is_satisfied(action):
return True
return action.nargs == REMAINDER
else:
return action.nargs == REMAINDER and num_consumed_args >= 1
class IntrospectiveArgumentParser(ArgumentParser):
'''The following is a verbatim copy of ArgumentParser._parse_known_args (Python 2.7.3),
except for the lines that contain the string "Added by argcomplete".
'''
def _parse_known_args(self, arg_strings, namespace):
_num_consumed_args.clear() # Added by argcomplete
self._argcomplete_namespace = namespace
self.active_actions: List[Action] = [] # Added by argcomplete
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts: Dict[Action, List[Action]] = {}
self._action_conflicts = action_conflicts # Added by argcomplete
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1 :])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions: Set[Action] = set()
seen_non_default_actions: Set[Action] = set()
self._seen_non_default_actions = seen_non_default_actions # Added by argcomplete
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = gettext('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS or isinstance(action, _SubParsersAction):
try:
action(self, namespace, argument_values, option_string)
except BaseException:
# Begin added by argcomplete
# When a subparser action is taken and fails due to incomplete arguments, it does not merge the
# contents of its parsed namespace into the parent namespace. Do that here to allow completers to
# access the partially parsed arguments for the subparser.
if isinstance(action, _SubParsersAction):
subnamespace = action._name_parser_map[argument_values[0]]._argcomplete_namespace
for key, value in vars(subnamespace).items():
setattr(namespace, key, value)
# End added by argcomplete
raise
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
if isinstance(option_tuple, list): # Python 3.12.7+
option_tuple = option_tuple[0]
if len(option_tuple) == 3:
action, option_string, explicit_arg = option_tuple
else: # Python 3.11.9+, 3.12.3+, 3.13+
action, option_string, _, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples: List[Tuple[Action, List[str], str]] = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = gettext('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = gettext('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
self.active_actions = [action] # Added by argcomplete
_num_consumed_args[action] = 0 # Added by argcomplete
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
# Begin added by argcomplete
# If the pattern is not open (e.g. no + at the end), remove the action from active actions (since
# it wouldn't be able to consume any more args)
_num_consumed_args[action] = len(args)
if not action_is_open(action):
self.active_actions.remove(action)
# End added by argcomplete
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts): # Added by argcomplete
self.active_actions.append(action) # Added by argcomplete
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index : start_index + arg_count]
start_index += arg_count
_num_consumed_args[action] = len(args) # Added by argcomplete
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts) :]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([index for index in option_string_indices if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.active_actions.append(positionals[0]) # Added by argcomplete
self.error(gettext('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(gettext('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [
str(_get_action_name(action)) for action in group._group_actions if action.help is not SUPPRESS
]
msg = gettext('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@argcomplete@py3@argcomplete@packages@[email protected]_END.py
|
{
"filename": "util.py",
"repo_name": "snad-space/ztf-viewer",
"repo_path": "ztf-viewer_extracted/ztf-viewer-master/ztf_viewer/util.py",
"type": "Python"
}
|
import datetime
import json
import logging
import math
import re
from collections import defaultdict
from functools import wraps
from itertools import chain, count
import astropy.table
import numpy as np
from astropy import units
from astropy.coordinates import EarthLocation
from astropy.time import Time
from immutabledict import immutabledict
from jinja2 import Template
YEAR = datetime.datetime.now().year
PALOMAR = EarthLocation(lon=-116.863, lat=33.356, height=1706) # EarthLocation.of_site('Palomar')
# https://www.minorplanetcenter.net/iau/lists/ObsCodes.html
PALOMAR_OBS_CODE = "I41"
DEFAULT_MIN_MAX_MJD = 50000.0, 70000.0
INF = float("inf")
LN10_04 = 0.4 * np.log(10.0)
LGE_25 = 2.5 / np.log(10.0)
ABZPMAG_JY = 8.9
FILTER_COLORS = {
"g": "#62D03E",
"r": "#CC3344",
"i": "#1c1309",
"g'": "#62D03E",
"r'": "#CC3344",
"i'": "#1c1309",
"zg": "#62D03E",
"zr": "#CC3344",
"zi": "#1c1309",
"ant_g": "#05ffc5",
"ant_R": "#f08b98",
"gaia_G": "grey",
"gaia_BP": "blue",
"gaia_RP": "red",
"ps_g": "#a7d984",
"ps_r": "#e0848f",
"ps_i": "#694721",
"ps_z": "#5c5cff",
"ps_y": "#9370d8",
}
FILTERS_ORDER = defaultdict(lambda: 100) | dict(zip(FILTER_COLORS, count(1)))
FILTERS = tuple(FILTER_COLORS)
ZTF_FILTERS = ("zg", "zr", "zi")
DEFAULT_DR = "dr17"
available_drs = (
"dr2",
"dr3",
"dr4",
"dr8",
"dr13",
"dr17",
)
def db_coord_to_degrees(coord):
match = re.search(r"^\((\S+)\s*,\s*(\S+)\)$", coord)
ra = math.degrees(float(match.group(1)))
dec = math.degrees(float(match.group(2)))
return ra, dec
def hms_to_deg(hms: str):
h, m, s = (float(x) for x in hms.split())
angle = h * units.hourangle + m * units.arcmin + s * units.arcsec
deg = angle.to_value(units.deg)
return deg
def html_from_astropy_table(table: astropy.table.Table, columns: dict):
template = Template(
"""
<table id="simbad-table">
<tr>
{% for column in columns %}
<td>{{columns[column]}}</td>
{% endfor %}
</tr>
{% for row in table %}
<tr>
{% for cell in row %}
<td>{{cell}}</td>
{% endfor %}
</tr>
{% endfor %}
</table>
"""
)
table = table[list(columns.keys())].copy()
for column in table.colnames:
table[column] = [to_str(x) for x in table[column]]
html = template.render(table=table, columns=columns)
return html
def to_str(s, *, float_decimal_digits=3):
if isinstance(s, bytes):
return s.decode()
if isinstance(s, str):
return s
if isinstance(s, np.integer) or isinstance(s, int):
return str(s)
if isinstance(s, np.floating) or isinstance(s, float):
if np.isnan(s):
return ""
return f"{s:.{float_decimal_digits}f}"
if isinstance(s, units.Quantity):
if s.unit.is_equivalent("cm"):
for unit in (units.pc, units.kpc, units.Mpc, units.Gpc):
if 1e-1 < (distance := s.to(unit)).value < 3e3:
return f"{distance:.2f}"
else:
logging.warning(f"Value {s} is too large or too small")
return str(s)
if np.ma.is_masked(s):
return ""
raise ValueError(f"Argument should be str, bytes, int, float or unit.Quantity (distance), not {type(s)}")
def format_sep(sep_arcsec: float, float_decimal_digits_small: int = 3, float_decimal_digits_large: int = 1) -> str:
if sep_arcsec < 0.0:
raise ValueError(f"Separation {sep_arcsec} < 0")
if sep_arcsec < 60.0:
return f"{sep_arcsec:.{float_decimal_digits_small}f}″"
if sep_arcsec < 3600.0:
arcmin = int(sep_arcsec // 60.0)
arcsec = sep_arcsec % 60.0
return f"{arcmin:d}′{arcsec:02.{float_decimal_digits_large}f}″"
deg = int(sep_arcsec // 3600.0)
arcmin = int(sep_arcsec // 60.0 - deg * 60.0)
arcsec = sep_arcsec % 60.0 % 60.0
return f"{deg:d}°{arcmin:02d}′{arcsec:02.0f}″"
def anchor_form(url, data, title):
inputs = "\n".join(f'<input type="hidden" name="{key}" value="{value}">' for key, value in data.items())
return f"""
<form method="post" action="{url}" class="inline">
{inputs}
<button type="submit" class="link-button">
{title}
</button>
</form>
"""
def min_max_mjd_short(dr):
if dr == "dr2":
return 58194.0, 58299.0
if dr == "dr3":
return 58194.0, 58483.0
if dr == "dr4":
return 58194.0, 58664.0
if dr == "dr7":
return 58194.0, 58908.0
if dr == "dr8":
return 58194.0, 58972.0
if dr == "dr13":
return 58194.0, 59280.0
if dr == "dr17":
return 58194.0, 59524.0
return -INF, INF
def hmjd_to_earth(hmjd, coord):
t = Time(hmjd, format="mjd")
return t - t.light_travel_time(coord, kind="heliocentric", location=PALOMAR)
def raise_if(condition, exception):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
if condition:
raise exception
return f(*args, **kwargs)
return wrapper
return decorator
def joiner(value, iterator):
iterator = iter(iterator)
yield next(iterator)
for item in iterator:
yield value
yield item
def list_join(value, iterator):
return list(joiner(value, iterator))
def _json_hook(d):
for k, v in d.items():
if isinstance(v, list):
d[k] = tuple(v)
return immutabledict(d)
def parse_json_to_immutable(s):
return json.loads(s, object_hook=_json_hook)
def flip(items, ncol):
"""https://stackoverflow.com/a/10101532/5437597"""
return chain(*[items[i::ncol] for i in range(ncol)])
def ccdid_from_rcid(rcid: int) -> int:
return rcid // 4 + 1
def qid_from_rcid(rcid: int) -> int:
return rcid % 4 + 1
class immutabledefaultdict(immutabledict):
dict_cls = defaultdict
def compose_plus_minus_expression(value, lower, upper, **to_str_kwargs):
return f"""
<div class="expression">
{to_str(value, **to_str_kwargs)}
<span class='supsub'>
<sup class='superscript'>+{to_str(upper - value, **to_str_kwargs)}</sup>
<sub class='subscript'>-{to_str(value - lower, **to_str_kwargs)}</sub>
</span>
</div>
"""
|
snad-spaceREPO_NAMEztf-viewerPATH_START.@ztf-viewer_extracted@ztf-viewer-master@[email protected]@.PATH_END.py
|
{
"filename": "tools.py",
"repo_name": "bradkav/NbodyIMRI",
"repo_path": "NbodyIMRI_extracted/NbodyIMRI-main/NbodyIMRI/tools.py",
"type": "Python"
}
|
import numpy as np
from scipy.integrate import cumtrapz
import string
import random
import NbodyIMRI
from NbodyIMRI import units as u
from os.path import join
import h5py
import glob
def open_file_for_read(fileID):
"""
Open an output file in order to be read (taking care of the correct directory structure and file endings)
Parameters:
fileID (string): fileID of the file you'd like to load.
"""
filestr = join(NbodyIMRI.snapshot_dir, fileID)
flist = glob.glob(filestr + "*")
if (len(flist) != 1):
raise ValueError(f"File <{filestr}*> cannot be found or is not unique.")
else:
fname = flist[0]
#if not fname.endswith(".hdf5"):
# fname += ".hdf5"
return h5py.File(fname, 'r')
def inverse_transform_sample(integrand, x_min, x_max, N_samps=1, N_grid=1000, log=True):
if (log == True):
x_grid = np.geomspace(x_min, x_max, N_grid)
else:
x_grid = np.linspace(x_min, x_max, N_grid)
P_grid = cumtrapz(integrand(x_grid), x_grid, initial = 0.0)
P_grid /= P_grid[-1]
u = np.random.rand(N_samps)
x_samps = np.interp(u, P_grid, x_grid)
return x_samps
def get_random_direction():
costheta = 2*np.random.rand() - 1
theta = np.arccos(costheta)
phi = 2*np.pi*np.random.rand()
return np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
def generate_hash(length=5):
h = np.zeros(length, dtype=str)
for i in range(length):
h[i] = random.choice(string.hexdigits)
return ''.join(h)
def norm(x):
return np.sqrt(np.sum(x**2, axis=-1))
def calc_orbital_elements(x, v, Mtot):
x_mag = norm(x)
v_mag = norm(v)
mu = u.G_N*Mtot
a = (2/x_mag - v_mag**2/mu)**-1
#https://astronomy.stackexchange.com/questions/29005/calculation-of-eccentricity-of-orbit-from-velocity-and-radius
h = np.cross(x,v)
h_mag = norm(h)
e = np.sqrt(1-np.clip(h_mag**2/(mu*a), 0, 1))
#e_vec = np.cross(v,h)/mu - x/np.atleast_2d(x_mag).T
#e = norm(e_vec)
return a, e
def calc_Torb(a_i, M_tot):
return 2*np.pi*np.sqrt(a_i**3/(u.G_N*M_tot))
def calc_rho_6(rho_sp, M_1, gamma):
r_6 = 1e-6*u.pc
k = (3-gamma)*(0.2)**(3-gamma)/(2*np.pi)
rho_6 = (rho_sp)**(1-gamma/3)*(k*M_1)**(gamma/3)*r_6**-gamma
return rho_6
def calc_risco(M_1):
return 6*u.G_N*M_1/u.c**2
|
bradkavREPO_NAMENbodyIMRIPATH_START.@NbodyIMRI_extracted@NbodyIMRI-main@[email protected]@.PATH_END.py
|
{
"filename": "abscal_inspect_2458062.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/abscal_inspect/abscal_inspect_2458062.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Absolute Calibration Nightly Notebook
**Josh Dillon**, Last Revised 9/23/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal, abscal, utils
from hera_cal.smooth_cal import build_time_blacklist
from hera_qm.metrics_io import load_metric_file
import pyuvdata
import glob
import os
from copy import deepcopy
import inspect
import h5py
import matplotlib.cm as cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# lst_blacklist_string = '0-1.3 2.5-4.3 5.0-5.7 6.5-9.1 10.6-11.5 11.9-14.3 16.3-1.3'
# abscal_model_glob = '/lustre/aoc/projects/hera/zmartino/hera_calib_model/H3C/abscal_files_unique_baselines/zen.2458894.?????.uvh5'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
# os.environ["LST_BLACKLIST_STRING"] = lst_blacklist_string
# os.environ["ABSCAL_MODEL_GLOB"] = abscal_model_glob
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
lst_blacklist_string = os.environ['LST_BLACKLIST_STRING']
abscal_model_glob = os.environ['ABSCAL_MODEL_GLOB']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
print(f'lst_blacklist_string = "{lst_blacklist_string}"')
print(f'abscal_model_glob = "{abscal_model_glob}"')
```
JD = "2458062"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458062"
lst_blacklist_string = ""
abscal_model_glob = "/lustre/aoc/projects/hera/H1C_IDR3/abscal_model/zen.245804*.HH.uvRXLS.uvh5"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('...found {} data files.'.format(len(data_list)))
abscal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.abs.calfits')))
print('...found {} abscal files.'.format(len(abscal_list)))
omnical_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.sum.omni.calfits')))
print('...found {} omnical files.'.format(len(omnical_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458062 on JD 2458062
...found 73 data files.
...found 73 abscal files.
...found 73 omnical files.
# Load And Inspect a Single File
```python
# get all JDs and LSTs
_, _, file_lst_arrays, file_time_arrays = io.get_file_times(data_list)
# parse lst_blacklist_string
lst_blacklists = []
if len(lst_blacklist_string) > 0:
lst_blacklists = [tuple([float(arg) for arg in arg_pair.split('-', maxsplit=1)])
for arg_pair in lst_blacklist_string.split(' ')]
# get times that are blacklisted and reshape them like file_time_arrays
time_blacklisted_flat = build_time_blacklist(np.hstack(file_time_arrays), lst_blacklists=lst_blacklists)
time_blacklisted = [fta.astype(bool) for fta in file_time_arrays]
n = 0
for i in range(len(file_time_arrays)):
time_blacklisted[i] = np.zeros_like(time_blacklisted[i], dtype=bool)
for j in range(len(file_time_arrays[i])):
time_blacklisted[i][j] = time_blacklisted_flat[n]
n += 1
# pick the central time from among the not-LST blacklisted files, if possible
good_indices = [i for i, tb in enumerate(time_blacklisted) if not np.any(tb)]
if len(good_indices) > 0:
file_index = good_indices[len(good_indices)//2]
else:
file_index = len(data_list)//2
file_JD = '.'.join([s for s in data_list[file_index].split('.') if s.isdigit()])
```
/lustre/aoc/projects/hera/heramgr/anaconda2/envs/h1c_idr3/lib/python3.7/site-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order)
```python
# Load abscal gains and determine ex_ants
hc = io.HERACal(abscal_list[file_index])
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Get min_bl_cut, we only want to compare baselines actually used in absolute calibration
try:
min_bl_cut = float(hc.history.replace('\n','').split('--min_bl_cut')[-1].split('--')[0].strip())
except:
print('Could not find min_bl_cut, setting to 1 m.')
min_bl_cut = 1.0
# Load the most common redundant baseline longer than min_bl_cut
hd = io.HERAData(data_list[file_index])
bls_to_plot = []
for pol in ['ee', 'nn']:
reds = redcal.get_reds(hd.antpos, pols=[pol])
# reds = redcal.filter_reds(reds, ex_ants=ex_ants)
reds = sorted(reds, key=len, reverse=True)
bl_lens = np.array([np.linalg.norm(hd.antpos[red[0][1]] - hd.antpos[red[0][0]]) for red in reds])
try:
bl_group_to_plot = (np.array(reds)[bl_lens >= min_bl_cut])[0]
except:
bl_group_to_plot = reds[0]
bls_to_plot.extend(bl_group_to_plot)
# reds = sorted(reds, key=len, reverse=True)
data, flags, nsamples = hd.read(bls=bls_to_plot)
apply_cal.calibrate_in_place(data, gains, data_flags=flags, cal_flags=gain_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1], c='w', s=0)
for ant,pos in hd.antpos.items():
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
#check whether the model is redudnant by looking at the history
model_is_redundant = ('--model_is_redundant' in "".join(hc.history.split()))
# Find files that overlap with this file
abscal_matched_files = list(abscal.match_times(data_list[file_index],
sorted(glob.glob(abscal_model_glob)),
filetype='uvh5', atol=1e-5))
hdm = io.HERAData(abscal_matched_files)
# Get model baselines to load
model_bls = hdm.bls
model_antpos = hdm.antpos
if isinstance(model_bls, dict):
model_bls = list(model_bls.values())[0]
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
_, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(bls_to_plot, model_bls,
hd.antpos, model_antpos=model_antpos,
model_is_redundant=model_is_redundant)
model, model_flags, _ = hdm.read(bls=model_bl_to_load)
# Rephase model at index of best match to mean LST in the data
model_index = np.argmin(np.abs(model.lsts - np.mean(data.lsts)))
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, np.mean(data.lsts) - model.lsts[model_index],
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
if not model_is_redundant:
model, _, _ = utils.red_average(model, flags=model_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for pol in ['ee', 'nn']:
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Jy)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for bl in [k for k in bls_to_plot if k[2] == pol]:
ant0, ant1 = utils.split_bl(bl)
blvec = hd.antpos[ant0[0]] - hd.antpos[ant1[0]]
if (ant0 not in ex_ants) and (ant1 not in ex_ants):
to_plot = deepcopy(data[bl])
to_plot[flags[bl]] = np.nan + 1.0j * np.nan
to_plot = np.nanmedian(np.real(to_plot), axis=0) + 1.0j * np.nanmedian(np.imag(to_plot), axis=0)
plot(hd.freqs/1e6, func(to_plot))
for bl in [k for k in model if k[2] == pol]:
plot(hd.freqs/1e6, func(model[bl][model_index]), 'k-', label='Abscal Model')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(pol, blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline group, absolute calibrated, compared to the Abscal Model
#### OBSERVER CHECKLIST:
* Check that the data all look pretty redundant.
* Check that the model isn't wildly out of line with the data.
# Load a whole day
```python
# Load chisq and flagging info from abscal gains
ant_flags_dict = {}
chisq_ee_dict = {}
chisq_nn_dict = {}
cspa_med_dict = {}
ants = set([])
for cal in abscal_list:
hc = io.HERACal(cal)
_, flags, cspa, chisq = hc.read()
ants |= set(flags.keys())
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
chisq_ee_dict[cal] = chisq['Jee']
chisq_nn_dict[cal] = chisq['Jnn']
cspa_med_dict[cal] = {ant: np.nanmedian(cspa[ant], axis=1) for ant in cspa}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
cspa = {ant: np.hstack([np.squeeze(cspa_med_dict[cal][ant]) / \
~ant_flags_dict[cal][ant] for cal in abscal_list]) for ant in ants}
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
```
invalid value encountered in true_divide
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([np.sum(~np.isfinite(cspa[ant]))
for ant in cspa if ant[1] == pol])
ant_candidates = sorted([ant for ant in cspa if ant[1] == pol and
np.sum(~np.isfinite(cspa[ant])) == min_flags])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
# Reload abscal gains
times_dict = {}
gain_dict = {}
flag_dict = {}
for cal in abscal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
times_dict[cal] = hc.times
gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
times = np.hstack(list(times_dict.values()))
lsts = 12 / np.pi * pyuvdata.utils.get_lst_for_time(times, *hd.telescope_location_lat_lon_alt_degrees)
gains = {ant: np.vstack([gain_dict[cal][ant] for cal in gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flags = {ant: np.vstack([flag_dict[cal][ant] for cal in flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in flags.values()], axis=0)
```
# Inspect a whole day
```python
# for overplotting blacklisted LSTs
my_cmap = cm.binary
my_cmap.set_under('k', alpha=0)
blacklist = np.ones_like(ee_chisq) * np.hstack(time_blacklisted)[:, np.newaxis]
```
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("binary"))
```python
# Grid and plot overall chi^2 for each polarization
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
fig, axes = plt.subplots(1, 2, figsize=(20,12))
for ax, cs, t in zip(axes, [ee_chisq, nn_chisq], ['ee-polarized', 'nn-polarized']):
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(cs / ~flag_mask, aspect='auto', vmin=0, cmap='inferno', vmax=10, interpolation='nearest', extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title('Overall Abscal $\chi^2$ / $N_{bls}$: ' + t)
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, label='$\chi^2$ / $N_{bls}$ (unitless)')
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 3 Overall Abscal $\chi^2 / N_{bls}$
This computes the difference between the calibrated data and the abscal model, normalized by the thermal noise. Grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing.
#### OBSERVER CHECKLIST:
* Look for regions of high $\chi^2$ that are not blacklisted.
```python
# Pick vmax to not saturate 90% of the data
vmax = np.max([np.percentile(np.abs(gains[ants_to_save[pol][1]][~flag_mask]), 90) for pol in ['Jee', 'Jnn']])
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(3, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(gains_here), aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Abscal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.07)
# Now plot median gain spectra and time series
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(np.abs(gains_here[~np.hstack(time_blacklisted), :]), axis=0))
ax.set_ylim([0, vmax])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Non-Blacklisted Abscal Gain Amplitude Spectrum of Antenna {ant[0]}: {pol[1:]}-polarized')
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.nanmedian(np.abs(gains_here[~np.hstack(time_blacklisted), :]), axis=1),
'b.', label='Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.nanmedian(np.abs(gains_here[np.hstack(time_blacklisted), :]), axis=1),
'r.', label='Blacklisted LSTs')
ax.set_ylim([0, vmax])
ax.set_xlabel('LST (hours)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Abscal Gain Amplitude Time-Series of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
plt.tight_layout()
```
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered

### Figure 4: Example Abscal Gain Amplitudes
Abscal gain amplitudes for an example antenna. In the waterfall, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted amplitude as a function of frequency (middle row) and the median amplitude as a function of time (bottom row)
#### OBSERVER CHECKLIST:
* Look to see that non-blacklisted times are relatively stable in amplitude
* Check to see if the bandpass looks reasonable
```python
# Plot abscal gain phase waterfalls for a single antenna/refant
fig, axes = plt.subplots(3, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(gains_ratio_here), aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.07)
# Now plot median gain spectra and time series
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
re_med = np.nanmedian(gains_ratio_here[~np.hstack(time_blacklisted), :].real, axis=0)
im_med = np.nanmedian(gains_ratio_here[~np.hstack(time_blacklisted), :].imag, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(re_med + 1.0j * im_med))
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Phase of g$_{{ant0[0]}}$ / g$_{{ant1[0]}}$')
ax.set_title(f'Median Non-Blacklisted Abscal Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
# Now plot a single gain angle time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
# pick channel with minimum phase variance in the middle 100 channels
possible_chans = np.arange(len(hd.freqs))[len(hd.freqs)//2 - 50:len(hd.freqs)//2 + 50]
best_chan = np.argmin(np.var(np.angle(gains_ratio_here), axis=0)[len(hd.freqs)//2 - 50:len(hd.freqs)//2 + 50])
chan = possible_chans[best_chan]
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.angle(gains_ratio_here[~np.hstack(time_blacklisted), chan]),
'b.', label='Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.angle(gains_ratio_here[np.hstack(time_blacklisted), chan]),
'r.', label='Blacklisted LSTs')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('LST (hours)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]} at Channel {chan}: {pol[1:]}-polarized')
ax.legend()
plt.tight_layout()
```
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered

### Figure 5: Example Abscal Gain Phases
Relative gain phases of two example antennas. In the waterfall, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted phases as a function of frequency (middle row) and the phase of the specific channel within 50 channels of the middle with minimal phase variance (bottom row).
#### OBSERVER CHECKLIST:
* Look for regions of "hashy" phase structure that are not blacklisted or attributable to RFI.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: master
git_description: v3.0-733-gd2dd8ccf
git_hash: d2dd8ccf3fe43d5e5eb6a4c28ceaf4a6e3d1fcb7
git_origin: [email protected]:HERA-Team/hera_cal.git
version: 3.0
------------
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@abscal_inspect@[email protected]_END.py
|
{
"filename": "compound.py",
"repo_name": "panoptes/POCS",
"repo_path": "POCS_extracted/POCS-main/src/panoptes/pocs/scheduler/observation/compound.py",
"type": "Python"
}
|
import numpy as np
from astropy import units as u
from panoptes.utils.utils import get_quantity_value, listify
from panoptes.pocs.scheduler.observation.base import Observation as BaseObservation
class Observation(BaseObservation):
"""An observation that consists of different combinations of exptimes."""
def __init__(self, *args, **kwargs):
"""Accept a list of exptimes."""
# Save all the exptimes.
self._exptimes = listify(kwargs['exptime'])
# Use the first exposure time to set up observation.
kwargs['exptime'] = self._exptimes[0]
super(Observation, self).__init__(*args, **kwargs)
self._min_duration = np.sum(self._exptimes)
self._set_duration = np.sum(
[self._exptimes[i % len(self._exptimes)] for i in range(self.exp_set_size)])
self.is_compound = True
@property
def exptime(self):
""" Return current exposure time as a u.Quantity. """
current_exptime_index = self.current_exp_num % len(self._exptimes)
exptime = self._exptimes[current_exptime_index]
return get_quantity_value(exptime, u.second) * u.second
@property
def exptimes(self):
return self._exptimes
def __str__(self):
return f"{self.field}: exptime={self.exptime} " \
f"exptime_set={self._exptimes!r} " \
f"in blocks of {self.exp_set_size}, " \
f"minimum {self.min_nexp}, " \
f"priority {self.priority:.0f}"
|
panoptesREPO_NAMEPOCSPATH_START.@POCS_extracted@POCS-main@src@panoptes@pocs@scheduler@[email protected]@.PATH_END.py
|
{
"filename": "chunk_sampler.py",
"repo_name": "i4Ds/sdo-cli",
"repo_path": "sdo-cli_extracted/sdo-cli-main/src/sdo/sood/data/chunk_sampler.py",
"type": "Python"
}
|
"""
Inspired by https://gist.github.com/wassname/8ae1f64389c2aaceeb84fcd34c3651c3
A Pytorch sampler that samples ordered indices from unordered sequences.
Good for the use with Dask because Dask will slow down when sampling between chunks.
Usually, it is better if batches are uncorrelated so we want each batch to be sequence from a different part of a chunk.
For example, given each chunk is `range(12)`. Our seq_len is 3. We might end up with these indices:
- [[1,2,3],[9,10,11],[4,5,6]]
Usage:
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=SequenceInChunkSampler(train_dataset, seq_len=batch_size, chunksize=batch_size*100)
)
"""
import torch.utils.data.sampler
import numpy as np
class SequenceInChunkSampler(torch.utils.data.sampler.Sampler):
"""
Samples sequences of elements sequentially, but random sequences in a chunk.
Arguments:
data_source (Dataset): dataset to sample from
seq_len (int): length of sequential sequences
chunksize (int): length of cached data to take random sequences from
"""
def __init__(self, data_source, seq_len=12, chunksize=120):
assert chunksize % seq_len == 0, "chunk size should be a multiple of seq_len"
assert len(data_source) > chunksize
self.data_source = data_source
self.seq_len = seq_len
self.chunksize = chunksize
def __iter__(self):
chunk_idxs = np.arange(0, len(self.data_source), self.chunksize)
max_i = len(self.data_source)
np.random.shuffle(chunk_idxs)
for chunk_idx in chunk_idxs:
seqs = np.arange(
chunk_idx, min(chunk_idx + self.chunksize, max_i), self.seq_len
)
np.random.shuffle(seqs)
for seq_i in seqs:
for i in np.arange(seq_i, min(seq_i + self.seq_len, max_i)):
yield i
def __len__(self):
return len(self.data_source)
|
i4DsREPO_NAMEsdo-cliPATH_START.@sdo-cli_extracted@sdo-cli-main@src@sdo@sood@data@[email protected]_END.py
|
{
"filename": "usnob_trim.py",
"repo_name": "dstndstn/astrometry.net",
"repo_path": "astrometry.net_extracted/astrometry.net-main/util/usnob_trim.py",
"type": "Python"
}
|
#! /usr/bin/env python3
# This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
# Used to trim down the "hpslit"-merged USNO-B files before
# building indices out of them.
from __future__ import print_function
import sys
from optparse import OptionParser
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from numpy import *
from astrometry.util.fits import *
from astrometry.util.healpix import *
from astrometry.util.starutil_numpy import *
from astrometry.util.usnob_cuts import *
def trim(infn, outfn):
print('Reading', infn)
X = fits_table(infn, columns=[
'num_detections', 'flags', 'an_diffraction_spike',
'field_1', 'field_3', 'magnitude_1', 'magnitude_3',
'field_0', 'field_2', 'magnitude_0', 'magnitude_2',
'ra', 'dec',
])
print('Read', len(X), 'sources')
print('Applying cuts')
I = usnob_apply_cuts(X)
# drop now-unwanted columns
for c in ['flags', 'an_diffraction_spike',
'num_detections' ]:
X.delete_column(c)
X.cut(I)
print('Kept', len(X), 'sources')
del I
print('Computing average mags')
X.field_0 = X.field_0.astype(np.int16)
X.field_1 = X.field_1.astype(np.int16)
X.field_2 = X.field_2.astype(np.int16)
X.field_3 = X.field_3.astype(np.int16)
X.magnitude_0 = X.magnitude_0.astype(np.float32)
X.magnitude_1 = X.magnitude_1.astype(np.float32)
X.magnitude_2 = X.magnitude_2.astype(np.float32)
X.magnitude_3 = X.magnitude_3.astype(np.float32)
usnob_compute_average_mags(X)
for c in [
'field_1', 'field_3', 'magnitude_1', 'magnitude_3',
'field_0', 'field_2', 'magnitude_0', 'magnitude_2']:
X.delete_column(c)
X.r_mag = X.r_mag.astype(np.float32)
X.b_mag = X.b_mag.astype(np.float32)
print('Writing output to', outfn)
X.writeto(outfn)
del X
if __name__ == '__main__':
#for hp in range(12):
if False:
# fitscopy usnob-07.fits"[#row<100000000]" usnob-07-a.fits
# fitscopy usnob-07.fits"[#row>=100000000]" usnob-07-b.fits
infn = 'usnob-07-a.fits'
outfn = 'usnob-trimmed-07-a.fits'
trim(infn, outfn)
if False:
infn = 'usnob-07-b.fits'
outfn = 'usnob-trimmed-07-b.fits'
trim(infn, outfn)
# cp usnob-trimmed-07-a.fits 07a.fits
# tabmerge usnob-trimmed-07-b.fits+1 07a.fits+1
# mv 07a.fits usnob-trimmed-07.fits
if False:
infn = 'usnob-10-a.fits'
outfn = 'usnob-trimmed-10-a.fits'
trim(infn, outfn)
if True:
infn = 'usnob-10-b.fits'
outfn = 'usnob-trimmed-10-b.fits'
trim(infn, outfn)
#for hp in range(7,12):
#for hp in range(8,12):
for hp in range(11,12):
infn = 'usnob-%02i.fits' % hp
outfn = 'usnob-trimmed-%02i.fits' % hp
trim(infn, outfn)
|
[email protected][email protected]@util@[email protected]_END.py
|
{
"filename": "_cog.py",
"repo_name": "sfarrens/sfof",
"repo_path": "sfof_extracted/sfof-master/sfof/python/euclid/dm/_cog.py",
"type": "Python"
}
|
# /home/sartor/pymodule/euclid/dm/_cog.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:a17fe44f5818a6dad4c3af2f6274ac08c385d7a5
# Generated 2014-07-24 16:26:39.932813 by PyXB version 1.2.3
# Namespace http://euclid.esa.org/schema/pro/le3/cog [xmlns:cog]
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:869ae486-133e-11e4-88d8-90b11c83965f')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.3'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import euclid.dm._fit as _ImportedBinding_euclid_dm__fit
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI(u'http://euclid.esa.org/schema/pro/le3/cog', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, unicode):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Complex type {http://euclid.esa.org/schema/pro/le3/cog}fofClusterInputCatalog with content type ELEMENT_ONLY
class fofClusterInputCatalog (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://euclid.esa.org/schema/pro/le3/cog}fofClusterInputCatalog with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'fofClusterInputCatalog')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 44, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element PhotZcatalog uses Python identifier PhotZcatalog
__PhotZcatalog = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'PhotZcatalog'), 'PhotZcatalog', '__httpeuclid_esa_orgschemaprole3cog_fofClusterInputCatalog_PhotZcatalog', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 46, 12), )
PhotZcatalog = property(__PhotZcatalog.value, __PhotZcatalog.set, None, None)
# Element SpecZcatalog uses Python identifier SpecZcatalog
__SpecZcatalog = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'SpecZcatalog'), 'SpecZcatalog', '__httpeuclid_esa_orgschemaprole3cog_fofClusterInputCatalog_SpecZcatalog', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 47, 12), )
SpecZcatalog = property(__SpecZcatalog.value, __SpecZcatalog.set, None, None)
_ElementMap.update({
__PhotZcatalog.name() : __PhotZcatalog,
__SpecZcatalog.name() : __SpecZcatalog
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'fofClusterInputCatalog', fofClusterInputCatalog)
# Complex type {http://euclid.esa.org/schema/pro/le3/cog}fofParams with content type ELEMENT_ONLY
class fofParams (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://euclid.esa.org/schema/pro/le3/cog}fofParams with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'fofParams')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 51, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element link_mode uses Python identifier link_mode
__link_mode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'link_mode'), 'link_mode', '__httpeuclid_esa_orgschemaprole3cog_fofParams_link_mode', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 53, 12), )
link_mode = property(__link_mode.value, __link_mode.set, None, None)
# Element print_bin_data uses Python identifier print_bin_data
__print_bin_data = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'print_bin_data'), 'print_bin_data', '__httpeuclid_esa_orgschemaprole3cog_fofParams_print_bin_data', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 54, 12), )
print_bin_data = property(__print_bin_data.value, __print_bin_data.set, None, None)
# Element link_r uses Python identifier link_r
__link_r = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'link_r'), 'link_r', '__httpeuclid_esa_orgschemaprole3cog_fofParams_link_r', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 55, 12), )
link_r = property(__link_r.value, __link_r.set, None, None)
# Element link_z uses Python identifier link_z
__link_z = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'link_z'), 'link_z', '__httpeuclid_esa_orgschemaprole3cog_fofParams_link_z', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 56, 12), )
link_z = property(__link_z.value, __link_z.set, None, None)
# Element kdtree_depth uses Python identifier kdtree_depth
__kdtree_depth = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'kdtree_depth'), 'kdtree_depth', '__httpeuclid_esa_orgschemaprole3cog_fofParams_kdtree_depth', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 57, 12), )
kdtree_depth = property(__kdtree_depth.value, __kdtree_depth.set, None, None)
# Element min_ngal uses Python identifier min_ngal
__min_ngal = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'min_ngal'), 'min_ngal', '__httpeuclid_esa_orgschemaprole3cog_fofParams_min_ngal', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 58, 12), )
min_ngal = property(__min_ngal.value, __min_ngal.set, None, None)
# Element z_min uses Python identifier z_min
__z_min = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'z_min'), 'z_min', '__httpeuclid_esa_orgschemaprole3cog_fofParams_z_min', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 59, 12), )
z_min = property(__z_min.value, __z_min.set, None, None)
# Element z_max uses Python identifier z_max
__z_max = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'z_max'), 'z_max', '__httpeuclid_esa_orgschemaprole3cog_fofParams_z_max', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 60, 12), )
z_max = property(__z_max.value, __z_max.set, None, None)
# Element z_bin_size uses Python identifier z_bin_size
__z_bin_size = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'z_bin_size'), 'z_bin_size', '__httpeuclid_esa_orgschemaprole3cog_fofParams_z_bin_size', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 61, 12), )
z_bin_size = property(__z_bin_size.value, __z_bin_size.set, None, None)
# Element z_ref uses Python identifier z_ref
__z_ref = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'z_ref'), 'z_ref', '__httpeuclid_esa_orgschemaprole3cog_fofParams_z_ref', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 62, 12), )
z_ref = property(__z_ref.value, __z_ref.set, None, None)
# Element c uses Python identifier c
__c = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'c'), 'c', '__httpeuclid_esa_orgschemaprole3cog_fofParams_c', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 63, 12), )
c = property(__c.value, __c.set, None, None)
# Element H0 uses Python identifier H0
__H0 = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'H0'), 'H0', '__httpeuclid_esa_orgschemaprole3cog_fofParams_H0', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 64, 12), )
H0 = property(__H0.value, __H0.set, None, None)
# Element omega_m uses Python identifier omega_m
__omega_m = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'omega_m'), 'omega_m', '__httpeuclid_esa_orgschemaprole3cog_fofParams_omega_m', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 65, 12), )
omega_m = property(__omega_m.value, __omega_m.set, None, None)
# Element omega_l uses Python identifier omega_l
__omega_l = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'omega_l'), 'omega_l', '__httpeuclid_esa_orgschemaprole3cog_fofParams_omega_l', False, pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 66, 12), )
omega_l = property(__omega_l.value, __omega_l.set, None, None)
_ElementMap.update({
__link_mode.name() : __link_mode,
__print_bin_data.name() : __print_bin_data,
__link_r.name() : __link_r,
__link_z.name() : __link_z,
__kdtree_depth.name() : __kdtree_depth,
__min_ngal.name() : __min_ngal,
__z_min.name() : __z_min,
__z_max.name() : __z_max,
__z_bin_size.name() : __z_bin_size,
__z_ref.name() : __z_ref,
__c.name() : __c,
__H0.name() : __H0,
__omega_m.name() : __omega_m,
__omega_l.name() : __omega_l
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'fofParams', fofParams)
# Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogGalaxyPhotoZLe3FitFile with content type ELEMENT_ONLY
class catalogGalaxyPhotoZLe3FitFile (_ImportedBinding_euclid_dm__fit.fitsFile):
"""Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogGalaxyPhotoZLe3FitFile with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'catalogGalaxyPhotoZLe3FitFile')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 17, 4)
_ElementMap = _ImportedBinding_euclid_dm__fit.fitsFile._ElementMap.copy()
_AttributeMap = _ImportedBinding_euclid_dm__fit.fitsFile._AttributeMap.copy()
# Base type is _ImportedBinding_euclid_dm__fit.fitsFile
# Element DataContainer (DataContainer) inherited from {http://euclid.esa.org/schema/bas/fit}fitsFile
# Attribute format is restricted from parent
# Attribute format uses Python identifier format
__format = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'format'), 'format', '__httpeuclid_esa_orgschemabasfit_fitsFile_format', _ImportedBinding_euclid_dm__fit.fitsFormatIdentifier, fixed=True, unicode_default=u'le3.catalog.galaxy.photoz', required=True)
__format._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 23, 16)
__format._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 23, 16)
format = property(__format.value, __format.set, None, None)
# Attribute version is restricted from parent
# Attribute version uses Python identifier version
__version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'version'), 'version', '__httpeuclid_esa_orgschemabasfit_fitsFile_version', _ImportedBinding_euclid_dm__fit.fitsFormatVersion, fixed=True, unicode_default=u'0.1', required=True)
__version._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 25, 16)
__version._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 25, 16)
version = property(__version.value, __version.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__format.name() : __format,
__version.name() : __version
})
Namespace.addCategoryObject('typeBinding', u'catalogGalaxyPhotoZLe3FitFile', catalogGalaxyPhotoZLe3FitFile)
# Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogGalaxySpecZLe3FitFile with content type ELEMENT_ONLY
class catalogGalaxySpecZLe3FitFile (_ImportedBinding_euclid_dm__fit.fitsFile):
"""Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogGalaxySpecZLe3FitFile with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'catalogGalaxySpecZLe3FitFile')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 31, 4)
_ElementMap = _ImportedBinding_euclid_dm__fit.fitsFile._ElementMap.copy()
_AttributeMap = _ImportedBinding_euclid_dm__fit.fitsFile._AttributeMap.copy()
# Base type is _ImportedBinding_euclid_dm__fit.fitsFile
# Element DataContainer (DataContainer) inherited from {http://euclid.esa.org/schema/bas/fit}fitsFile
# Attribute format is restricted from parent
# Attribute format uses Python identifier format
__format = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'format'), 'format', '__httpeuclid_esa_orgschemabasfit_fitsFile_format', _ImportedBinding_euclid_dm__fit.fitsFormatIdentifier, fixed=True, unicode_default=u'le3.catalog.galaxy.specz', required=True)
__format._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 37, 16)
__format._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 37, 16)
format = property(__format.value, __format.set, None, None)
# Attribute version is restricted from parent
# Attribute version uses Python identifier version
__version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'version'), 'version', '__httpeuclid_esa_orgschemabasfit_fitsFile_version', _ImportedBinding_euclid_dm__fit.fitsFormatVersion, fixed=True, unicode_default=u'0.1', required=True)
__version._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 39, 16)
__version._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 39, 16)
version = property(__version.value, __version.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__format.name() : __format,
__version.name() : __version
})
Namespace.addCategoryObject('typeBinding', u'catalogGalaxySpecZLe3FitFile', catalogGalaxySpecZLe3FitFile)
# Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogClusterLe3FitFile with content type ELEMENT_ONLY
class catalogClusterLe3FitFile (_ImportedBinding_euclid_dm__fit.fitsFile):
"""Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogClusterLe3FitFile with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'catalogClusterLe3FitFile')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 70, 4)
_ElementMap = _ImportedBinding_euclid_dm__fit.fitsFile._ElementMap.copy()
_AttributeMap = _ImportedBinding_euclid_dm__fit.fitsFile._AttributeMap.copy()
# Base type is _ImportedBinding_euclid_dm__fit.fitsFile
# Element DataContainer (DataContainer) inherited from {http://euclid.esa.org/schema/bas/fit}fitsFile
# Attribute format is restricted from parent
# Attribute format uses Python identifier format
__format = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'format'), 'format', '__httpeuclid_esa_orgschemabasfit_fitsFile_format', _ImportedBinding_euclid_dm__fit.fitsFormatIdentifier, fixed=True, unicode_default=u'le3.catalog.cluster', required=True)
__format._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 76, 16)
__format._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 76, 16)
format = property(__format.value, __format.set, None, None)
# Attribute version is restricted from parent
# Attribute version uses Python identifier version
__version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'version'), 'version', '__httpeuclid_esa_orgschemabasfit_fitsFile_version', _ImportedBinding_euclid_dm__fit.fitsFormatVersion, fixed=True, unicode_default=u'0.1', required=True)
__version._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 78, 16)
__version._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 78, 16)
version = property(__version.value, __version.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__format.name() : __format,
__version.name() : __version
})
Namespace.addCategoryObject('typeBinding', u'catalogClusterLe3FitFile', catalogClusterLe3FitFile)
# Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogCluster_membersLe3FitFile with content type ELEMENT_ONLY
class catalogCluster_membersLe3FitFile (_ImportedBinding_euclid_dm__fit.fitsFile):
"""Complex type {http://euclid.esa.org/schema/pro/le3/cog}catalogCluster_membersLe3FitFile with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'catalogCluster_membersLe3FitFile')
_XSDLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 84, 4)
_ElementMap = _ImportedBinding_euclid_dm__fit.fitsFile._ElementMap.copy()
_AttributeMap = _ImportedBinding_euclid_dm__fit.fitsFile._AttributeMap.copy()
# Base type is _ImportedBinding_euclid_dm__fit.fitsFile
# Element DataContainer (DataContainer) inherited from {http://euclid.esa.org/schema/bas/fit}fitsFile
# Attribute format is restricted from parent
# Attribute format uses Python identifier format
__format = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'format'), 'format', '__httpeuclid_esa_orgschemabasfit_fitsFile_format', _ImportedBinding_euclid_dm__fit.fitsFormatIdentifier, fixed=True, unicode_default=u'le3.catalog.cluster_members', required=True)
__format._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 90, 16)
__format._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 90, 16)
format = property(__format.value, __format.set, None, None)
# Attribute version is restricted from parent
# Attribute version uses Python identifier version
__version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'version'), 'version', '__httpeuclid_esa_orgschemabasfit_fitsFile_version', _ImportedBinding_euclid_dm__fit.fitsFormatVersion, fixed=True, unicode_default=u'0.1', required=True)
__version._DeclarationLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 92, 16)
__version._UseLocation = pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 92, 16)
version = property(__version.value, __version.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__format.name() : __format,
__version.name() : __version
})
Namespace.addCategoryObject('typeBinding', u'catalogCluster_membersLe3FitFile', catalogCluster_membersLe3FitFile)
fofClusterInputCatalog._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'PhotZcatalog'), catalogGalaxyPhotoZLe3FitFile, scope=fofClusterInputCatalog, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 46, 12)))
fofClusterInputCatalog._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'SpecZcatalog'), catalogGalaxySpecZLe3FitFile, scope=fofClusterInputCatalog, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 47, 12)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(fofClusterInputCatalog._UseForTag(pyxb.namespace.ExpandedName(None, u'PhotZcatalog')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 46, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(fofClusterInputCatalog._UseForTag(pyxb.namespace.ExpandedName(None, u'SpecZcatalog')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 47, 12))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
st_0._set_transitionSet(transitions)
transitions = []
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
fofClusterInputCatalog._Automaton = _BuildAutomaton()
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'link_mode'), pyxb.binding.datatypes.string, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 53, 12), fixed=True, unicode_default=u'dynamic'))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'print_bin_data'), pyxb.binding.datatypes.string, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 54, 12), fixed=True, unicode_default=u'no'))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'link_r'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 55, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'link_z'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 56, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'kdtree_depth'), pyxb.binding.datatypes.integer, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 57, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'min_ngal'), pyxb.binding.datatypes.int, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 58, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'z_min'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 59, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'z_max'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 60, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'z_bin_size'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 61, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'z_ref'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 62, 12)))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'c'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 63, 12), fixed=True, unicode_default=u'2.997e5'))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'H0'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 64, 12), fixed=True, unicode_default=u'100.0'))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'omega_m'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 65, 12), fixed=True, unicode_default=u'0.3'))
fofParams._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'omega_l'), pyxb.binding.datatypes.double, scope=fofParams, location=pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 66, 12), fixed=True, unicode_default=u'0.7'))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'link_mode')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 53, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'print_bin_data')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 54, 12))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'link_r')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 55, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'link_z')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 56, 12))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'kdtree_depth')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 57, 12))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'min_ngal')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 58, 12))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'z_min')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 59, 12))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'z_max')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 60, 12))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'z_bin_size')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 61, 12))
st_8 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'z_ref')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 62, 12))
st_9 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'c')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 63, 12))
st_10 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'H0')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 64, 12))
st_11 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_11)
final_update = None
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'omega_m')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 65, 12))
st_12 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_12)
final_update = set()
symbol = pyxb.binding.content.ElementUse(fofParams._UseForTag(pyxb.namespace.ExpandedName(None, u'omega_l')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 66, 12))
st_13 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_13)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
]))
st_8._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_11, [
]))
st_10._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_12, [
]))
st_11._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_13, [
]))
st_12._set_transitionSet(transitions)
transitions = []
st_13._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
fofParams._Automaton = _BuildAutomaton_()
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(catalogGalaxyPhotoZLe3FitFile._UseForTag(pyxb.namespace.ExpandedName(None, u'DataContainer')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 21, 20))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
catalogGalaxyPhotoZLe3FitFile._Automaton = _BuildAutomaton_2()
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(catalogGalaxySpecZLe3FitFile._UseForTag(pyxb.namespace.ExpandedName(None, u'DataContainer')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 35, 20))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
catalogGalaxySpecZLe3FitFile._Automaton = _BuildAutomaton_3()
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(catalogClusterLe3FitFile._UseForTag(pyxb.namespace.ExpandedName(None, u'DataContainer')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 74, 20))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
catalogClusterLe3FitFile._Automaton = _BuildAutomaton_4()
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(catalogCluster_membersLe3FitFile._UseForTag(pyxb.namespace.ExpandedName(None, u'DataContainer')), pyxb.utils.utility.Location(u'/home/sartor/workspace/EUCLID/svn_tot/schema/branches/challenge4/Dictionary/pro/le3/cog/euc-test-le3-cog.xsd', 88, 20))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
catalogCluster_membersLe3FitFile._Automaton = _BuildAutomaton_5()
|
sfarrensREPO_NAMEsfofPATH_START.@sfof_extracted@sfof-master@sfof@python@euclid@dm@[email protected]_END.py
|
{
"filename": "_x.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermap/marker/colorbar/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="x", parent_name="scattermap.marker.colorbar", **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermap@marker@colorbar@[email protected]_END.py
|
{
"filename": "ProductModel.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/source/ProductModel.py",
"type": "Python"
}
|
import numpy as numpy
from . import Tools
from .Model import Model
from .NonLinearModel import NonLinearModel
from astropy import units
__author__ = "Do Kester"
__year__ = 2020
__license__ = "GPL3"
__version__ = "2.5.3"
__url__ = "https://www.bayesicfitting.nl"
__status__ = "Perpetual Beta"
# *
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * 2017 - 2020 Do Kester
class ProductModel( NonLinearModel ):
"""
Direct product of 2 (or more) models.
The dimensionality of this model is equal to the number of constituent models.
The number of parameters is the sum of the parameters of the models.
Examples
--------
>>> nxk = 17
>>> nyk = 11
>>> xknots = numpy.arange( nxk , dtype=float ) * 10 # make knots from 0 to 160
>>> yknots = numpy.arange( nyk , dtype=float ) * 10 # make knots from 0 to 100
>>> smx = SplinesModel( xknots )
>>> smy = SplinesModel( yknots )
>>> csm = ProductModel( [smx,smy] )
>>> print csm.getNumberOfParameters( ) # ( nxk + order - 1 ) + ( nyk + order - 1 )
32
# ... fitter etc. see Fitter
Category mathematics/Fitting
Attributes
----------
models : list of Model
models to be multiplied, one for each dimension.
Attributes from Model
---------------------
npchain, parameters, stdevs, xUnit, yUnit
Attributes from FixedModel
--------------------------
npmax, fixed, parlist, mlist
Attributes from BaseModel
--------------------------
npbase, ndim, priors, posIndex, nonZero, tiny, deltaP, parNames
"""
def __init__( self, models, copy=None, fixed=None, **kwargs ):
"""
Direct product of 2 (or more) models. It has dimensionality equal to
the number of constituent models.
The models are given as input, the consecutive colums in xdata.
The number of parameters is the sum of the parameters of the
constituent models
Parameters
----------
models : list of Model
the constituent models
copy : ProductModel
model to be copied
fixed : dict
If not None raise AttributeError.
Raises
------
ValueError
When one of the models is 2 (ore more) dimensional
AttributeErrr : When fixed is not None
"""
if fixed is not None :
raise AttributeError( "ProductModel cannot have fixed parameters" )
np = 0
for m in models :
if m.ndim > 1 :
raise ValueError( "Only 1-dim models are allowed in ProductModel" )
np += m.npchain
super( ProductModel, self ).__init__( np, ndim=len( models ), copy=copy, **kwargs )
self.models = models
def copy( self ):
""" Copy method. """
mdls = [m.copy() for m in self.models]
return ProductModel( mdls, copy=self )
def __setattr__( self, name, value ):
"""
Set attributes: models
"""
dlst = {'models': Model }
if Tools.setListOfAttributes( self, name, value, dlst ) :
pass
else :
super( ProductModel, self ).__setattr__( name, value )
def baseResult( self, xdata, params ):
"""
Returns the partials at the input value.
The partials are the powers of x (input) from 0 to degree.
Parameters
----------
xdata : array_like
value at which to calculate the partials
params : array_like
parameters to the model.
"""
ndata = Tools.length( xdata[:,0] )
k = 0
n = 0
res = numpy.ones( ndata, dtype=float )
for m in self.models :
res *= m.result( xdata[:,k], params[n:n+m.npchain] )
k += 1
n += m.npchain
return res
def basePartial( self, xdata, params, parlist=None ):
"""
Returns the partials at the input value.
The partials are the powers of x (input) from 0 to degree.
Parameters
----------
xdata : array_like
value at which to calculate the partials
params : array_like
parameters to the model.
parlist : array_like
not used in this model
"""
ndata = Tools.length( xdata[:,0] )
partial = numpy.ndarray( ( 0, ndata ), dtype=float ) # transpose of needed shape
k = 0
n = 0
res = numpy.ones( ndata, dtype=float )
for m in self.models :
nm = n + m.npchain
x = m.result( xdata[:,k], params[n:nm] )
partial *= x
p = res * m.partial( xdata[:,k], params[n:nm] ).transpose()
partial = numpy.append( partial, p, axis=0 )
res *= x
k += 1
n = nm
return partial.transpose() # transpose back to proper shape
def baseName( self ):
"""
Returns a string representation of the model.
"""
strm = ""
ch = "xyzuvw"
k = 0
for m in self.models :
strm += m.shortName() + "(%s) * " % ch[k]
k += 1
strm = strm[:-3]
return str( "%dd-Product: f(%s:p) = %s"%(self.ndim, ch[:k], strm) )
def baseParameterName( self, k ):
"""
Return the name of a parameter as "param<dim>_<seq>.
Parameters
----------
k : int
the kth parameter.
"""
strpar = "param"
m = 0
for mdl in self.models :
nx = mdl.npchain
if k < nx :
return strpar + "%d_%d"%(m,k)
k -= nx
m += 1
return strpar
def baseParameterUnit( self, k ):
"""
Return the unit of a parameter.
Parameters
----------
k : int
the kth parameter.
"""
u = units.Unit( 1.0 )
n = 0
# print( self.attsingle )
# print( self.models[0].attsingle )
# print( self.models[1].attsingle )
# print( self.models[0].xUnit, self.models[1].xUnit )
for mdl in self.models :
mdl.xUnit = self.xUnit[n]
# print( mdl.ndim, mdl.xUnit, self.xUnit[n] )
nx = mdl.npbase
if k < nx :
# print( k, nx, mdl.getParameterUnit( k ), u )
return mdl.getParameterUnit( k ) / u
n += 1
k -= nx
u = self.yUnit
return u
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@[email protected]@.PATH_END.py
|
{
"filename": "gmock_faq.md",
"repo_name": "google/googletest",
"repo_path": "googletest_extracted/googletest-main/docs/gmock_faq.md",
"type": "Markdown"
}
|
# Legacy gMock FAQ
### When I call a method on my mock object, the method for the real object is invoked instead. What's the problem?
In order for a method to be mocked, it must be *virtual*, unless you use the
[high-perf dependency injection technique](gmock_cook_book.md#MockingNonVirtualMethods).
### Can I mock a variadic function?
You cannot mock a variadic function (i.e. a function taking ellipsis (`...`)
arguments) directly in gMock.
The problem is that in general, there is *no way* for a mock object to know how
many arguments are passed to the variadic method, and what the arguments' types
are. Only the *author of the base class* knows the protocol, and we cannot look
into his or her head.
Therefore, to mock such a function, the *user* must teach the mock object how to
figure out the number of arguments and their types. One way to do it is to
provide overloaded versions of the function.
Ellipsis arguments are inherited from C and not really a C++ feature. They are
unsafe to use and don't work with arguments that have constructors or
destructors. Therefore we recommend to avoid them in C++ as much as possible.
### MSVC gives me warning C4301 or C4373 when I define a mock method with a const parameter. Why?
If you compile this using Microsoft Visual C++ 2005 SP1:
```cpp
class Foo {
...
virtual void Bar(const int i) = 0;
};
class MockFoo : public Foo {
...
MOCK_METHOD(void, Bar, (const int i), (override));
};
```
You may get the following warning:
```shell
warning C4301: 'MockFoo::Bar': overriding virtual function only differs from 'Foo::Bar' by const/volatile qualifier
```
This is a MSVC bug. The same code compiles fine with gcc, for example. If you
use Visual C++ 2008 SP1, you would get the warning:
```shell
warning C4373: 'MockFoo::Bar': virtual function overrides 'Foo::Bar', previous versions of the compiler did not override when parameters only differed by const/volatile qualifiers
```
In C++, if you *declare* a function with a `const` parameter, the `const`
modifier is ignored. Therefore, the `Foo` base class above is equivalent to:
```cpp
class Foo {
...
virtual void Bar(int i) = 0; // int or const int? Makes no difference.
};
```
In fact, you can *declare* `Bar()` with an `int` parameter, and define it with a
`const int` parameter. The compiler will still match them up.
Since making a parameter `const` is meaningless in the method declaration, we
recommend to remove it in both `Foo` and `MockFoo`. That should workaround the
VC bug.
Note that we are talking about the *top-level* `const` modifier here. If the
function parameter is passed by pointer or reference, declaring the pointee or
referee as `const` is still meaningful. For example, the following two
declarations are *not* equivalent:
```cpp
void Bar(int* p); // Neither p nor *p is const.
void Bar(const int* p); // p is not const, but *p is.
```
### I can't figure out why gMock thinks my expectations are not satisfied. What should I do?
You might want to run your test with `--gmock_verbose=info`. This flag lets
gMock print a trace of every mock function call it receives. By studying the
trace, you'll gain insights on why the expectations you set are not met.
If you see the message "The mock function has no default action set, and its
return type has no default value set.", then try
[adding a default action](gmock_cheat_sheet.md#OnCall). Due to a known issue,
unexpected calls on mocks without default actions don't print out a detailed
comparison between the actual arguments and the expected arguments.
### My program crashed and `ScopedMockLog` spit out tons of messages. Is it a gMock bug?
gMock and `ScopedMockLog` are likely doing the right thing here.
When a test crashes, the failure signal handler will try to log a lot of
information (the stack trace, and the address map, for example). The messages
are compounded if you have many threads with depth stacks. When `ScopedMockLog`
intercepts these messages and finds that they don't match any expectations, it
prints an error for each of them.
You can learn to ignore the errors, or you can rewrite your expectations to make
your test more robust, for example, by adding something like:
```cpp
using ::testing::AnyNumber;
using ::testing::Not;
...
// Ignores any log not done by us.
EXPECT_CALL(log, Log(_, Not(EndsWith("/my_file.cc")), _))
.Times(AnyNumber());
```
### How can I assert that a function is NEVER called?
```cpp
using ::testing::_;
...
EXPECT_CALL(foo, Bar(_))
.Times(0);
```
### I have a failed test where gMock tells me TWICE that a particular expectation is not satisfied. Isn't this redundant?
When gMock detects a failure, it prints relevant information (the mock function
arguments, the state of relevant expectations, and etc) to help the user debug.
If another failure is detected, gMock will do the same, including printing the
state of relevant expectations.
Sometimes an expectation's state didn't change between two failures, and you'll
see the same description of the state twice. They are however *not* redundant,
as they refer to *different points in time*. The fact they are the same *is*
interesting information.
### I get a heapcheck failure when using a mock object, but using a real object is fine. What can be wrong?
Does the class (hopefully a pure interface) you are mocking have a virtual
destructor?
Whenever you derive from a base class, make sure its destructor is virtual.
Otherwise Bad Things will happen. Consider the following code:
```cpp
class Base {
public:
// Not virtual, but should be.
~Base() { ... }
...
};
class Derived : public Base {
public:
...
private:
std::string value_;
};
...
Base* p = new Derived;
...
delete p; // Surprise! ~Base() will be called, but ~Derived() will not
// - value_ is leaked.
```
By changing `~Base()` to virtual, `~Derived()` will be correctly called when
`delete p` is executed, and the heap checker will be happy.
### The "newer expectations override older ones" rule makes writing expectations awkward. Why does gMock do that?
When people complain about this, often they are referring to code like:
```cpp
using ::testing::Return;
...
// foo.Bar() should be called twice, return 1 the first time, and return
// 2 the second time. However, I have to write the expectations in the
// reverse order. This sucks big time!!!
EXPECT_CALL(foo, Bar())
.WillOnce(Return(2))
.RetiresOnSaturation();
EXPECT_CALL(foo, Bar())
.WillOnce(Return(1))
.RetiresOnSaturation();
```
The problem, is that they didn't pick the **best** way to express the test's
intent.
By default, expectations don't have to be matched in *any* particular order. If
you want them to match in a certain order, you need to be explicit. This is
gMock's (and jMock's) fundamental philosophy: it's easy to accidentally
over-specify your tests, and we want to make it harder to do so.
There are two better ways to write the test spec. You could either put the
expectations in sequence:
```cpp
using ::testing::Return;
...
// foo.Bar() should be called twice, return 1 the first time, and return
// 2 the second time. Using a sequence, we can write the expectations
// in their natural order.
{
InSequence s;
EXPECT_CALL(foo, Bar())
.WillOnce(Return(1))
.RetiresOnSaturation();
EXPECT_CALL(foo, Bar())
.WillOnce(Return(2))
.RetiresOnSaturation();
}
```
or you can put the sequence of actions in the same expectation:
```cpp
using ::testing::Return;
...
// foo.Bar() should be called twice, return 1 the first time, and return
// 2 the second time.
EXPECT_CALL(foo, Bar())
.WillOnce(Return(1))
.WillOnce(Return(2))
.RetiresOnSaturation();
```
Back to the original questions: why does gMock search the expectations (and
`ON_CALL`s) from back to front? Because this allows a user to set up a mock's
behavior for the common case early (e.g. in the mock's constructor or the test
fixture's set-up phase) and customize it with more specific rules later. If
gMock searches from front to back, this very useful pattern won't be possible.
### gMock prints a warning when a function without EXPECT_CALL is called, even if I have set its behavior using ON_CALL. Would it be reasonable not to show the warning in this case?
When choosing between being neat and being safe, we lean toward the latter. So
the answer is that we think it's better to show the warning.
Often people write `ON_CALL`s in the mock object's constructor or `SetUp()`, as
the default behavior rarely changes from test to test. Then in the test body
they set the expectations, which are often different for each test. Having an
`ON_CALL` in the set-up part of a test doesn't mean that the calls are expected.
If there's no `EXPECT_CALL` and the method is called, it's possibly an error. If
we quietly let the call go through without notifying the user, bugs may creep in
unnoticed.
If, however, you are sure that the calls are OK, you can write
```cpp
using ::testing::_;
...
EXPECT_CALL(foo, Bar(_))
.WillRepeatedly(...);
```
instead of
```cpp
using ::testing::_;
...
ON_CALL(foo, Bar(_))
.WillByDefault(...);
```
This tells gMock that you do expect the calls and no warning should be printed.
Also, you can control the verbosity by specifying `--gmock_verbose=error`. Other
values are `info` and `warning`. If you find the output too noisy when
debugging, just choose a less verbose level.
### How can I delete the mock function's argument in an action?
If your mock function takes a pointer argument and you want to delete that
argument, you can use testing::DeleteArg<N>() to delete the N'th (zero-indexed)
argument:
```cpp
using ::testing::_;
...
MOCK_METHOD(void, Bar, (X* x, const Y& y));
...
EXPECT_CALL(mock_foo_, Bar(_, _))
.WillOnce(testing::DeleteArg<0>()));
```
### How can I perform an arbitrary action on a mock function's argument?
If you find yourself needing to perform some action that's not supported by
gMock directly, remember that you can define your own actions using
[`MakeAction()`](#NewMonoActions) or
[`MakePolymorphicAction()`](#NewPolyActions), or you can write a stub function
and invoke it using [`Invoke()`](#FunctionsAsActions).
```cpp
using ::testing::_;
using ::testing::Invoke;
...
MOCK_METHOD(void, Bar, (X* p));
...
EXPECT_CALL(mock_foo_, Bar(_))
.WillOnce(Invoke(MyAction(...)));
```
### My code calls a static/global function. Can I mock it?
You can, but you need to make some changes.
In general, if you find yourself needing to mock a static function, it's a sign
that your modules are too tightly coupled (and less flexible, less reusable,
less testable, etc). You are probably better off defining a small interface and
call the function through that interface, which then can be easily mocked. It's
a bit of work initially, but usually pays for itself quickly.
This Google Testing Blog
[post](https://testing.googleblog.com/2008/06/defeat-static-cling.html) says it
excellently. Check it out.
### My mock object needs to do complex stuff. It's a lot of pain to specify the actions. gMock sucks!
I know it's not a question, but you get an answer for free any way. :-)
With gMock, you can create mocks in C++ easily. And people might be tempted to
use them everywhere. Sometimes they work great, and sometimes you may find them,
well, a pain to use. So, what's wrong in the latter case?
When you write a test without using mocks, you exercise the code and assert that
it returns the correct value or that the system is in an expected state. This is
sometimes called "state-based testing".
Mocks are great for what some call "interaction-based" testing: instead of
checking the system state at the very end, mock objects verify that they are
invoked the right way and report an error as soon as it arises, giving you a
handle on the precise context in which the error was triggered. This is often
more effective and economical to do than state-based testing.
If you are doing state-based testing and using a test double just to simulate
the real object, you are probably better off using a fake. Using a mock in this
case causes pain, as it's not a strong point for mocks to perform complex
actions. If you experience this and think that mocks suck, you are just not
using the right tool for your problem. Or, you might be trying to solve the
wrong problem. :-)
### I got a warning "Uninteresting function call encountered - default action taken.." Should I panic?
By all means, NO! It's just an FYI. :-)
What it means is that you have a mock function, you haven't set any expectations
on it (by gMock's rule this means that you are not interested in calls to this
function and therefore it can be called any number of times), and it is called.
That's OK - you didn't say it's not OK to call the function!
What if you actually meant to disallow this function to be called, but forgot to
write `EXPECT_CALL(foo, Bar()).Times(0)`? While one can argue that it's the
user's fault, gMock tries to be nice and prints you a note.
So, when you see the message and believe that there shouldn't be any
uninteresting calls, you should investigate what's going on. To make your life
easier, gMock dumps the stack trace when an uninteresting call is encountered.
From that you can figure out which mock function it is, and how it is called.
### I want to define a custom action. Should I use Invoke() or implement the ActionInterface interface?
Either way is fine - you want to choose the one that's more convenient for your
circumstance.
Usually, if your action is for a particular function type, defining it using
`Invoke()` should be easier; if your action can be used in functions of
different types (e.g. if you are defining `Return(*value*)`),
`MakePolymorphicAction()` is easiest. Sometimes you want precise control on what
types of functions the action can be used in, and implementing `ActionInterface`
is the way to go here. See the implementation of `Return()` in `gmock-actions.h`
for an example.
### I use SetArgPointee() in WillOnce(), but gcc complains about "conflicting return type specified". What does it mean?
You got this error as gMock has no idea what value it should return when the
mock method is called. `SetArgPointee()` says what the side effect is, but
doesn't say what the return value should be. You need `DoAll()` to chain a
`SetArgPointee()` with a `Return()` that provides a value appropriate to the API
being mocked.
See this [recipe](gmock_cook_book.md#mocking-side-effects) for more details and
an example.
### I have a huge mock class, and Microsoft Visual C++ runs out of memory when compiling it. What can I do?
We've noticed that when the `/clr` compiler flag is used, Visual C++ uses 5~6
times as much memory when compiling a mock class. We suggest to avoid `/clr`
when compiling native C++ mocks.
|
googleREPO_NAMEgoogletestPATH_START.@googletest_extracted@googletest-main@docs@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/violin/unselected/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._size import SizeValidator
from ._opacity import OpacityValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._size.SizeValidator",
"._opacity.OpacityValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@violin@unselected@marker@[email protected]_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/marker/pattern/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="treemap.marker.pattern", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@marker@pattern@[email protected]_END.py
|
{
"filename": "widget.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/widgets/widget.py",
"type": "Python"
}
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Base Widget class. Allows user to create widgets in the back-end that render
in the Jupyter notebook front-end.
"""
import os
import sys
import typing
from contextlib import contextmanager
from collections.abc import Iterable
from IPython import get_ipython
from traitlets import (
Any, HasTraits, Unicode, Dict, Instance, List, Int, Set, Bytes, observe, default, Container,
Undefined)
from json import loads as jsonloads, dumps as jsondumps
from .. import comm
from base64 import standard_b64encode
from .utils import deprecation, _get_frame
from .._version import __protocol_version__, __control_protocol_version__, __jupyter_widgets_base_version__
import inspect
TRAITLETS_FILE = inspect.getfile(HasTraits)
# Based on jupyter_core.paths.envset
def envset(name, default):
"""Return True if the given environment variable is turned on, otherwise False
If the environment variable is set, True will be returned if it is assigned to a value
other than 'no', 'n', 'false', 'off', '0', or '0.0' (case insensitive).
If the environment variable is not set, the default value is returned.
"""
if name in os.environ:
return os.environ[name].lower() not in ['no', 'n', 'false', 'off', '0', '0.0']
else:
return bool(default)
PROTOCOL_VERSION_MAJOR = __protocol_version__.split('.')[0]
CONTROL_PROTOCOL_VERSION_MAJOR = __control_protocol_version__.split('.')[0]
JUPYTER_WIDGETS_ECHO = envset('JUPYTER_WIDGETS_ECHO', default=True)
# we keep a strong reference for every widget created, for a discussion on using weak references see:
# https://github.com/jupyter-widgets/ipywidgets/issues/1345
_instances : typing.MutableMapping[str, "Widget"] = {}
def _widget_to_json(x, obj):
if isinstance(x, dict):
return {k: _widget_to_json(v, obj) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [_widget_to_json(v, obj) for v in x]
elif isinstance(x, Widget):
return "IPY_MODEL_" + x.model_id
else:
return x
def _json_to_widget(x, obj):
if isinstance(x, dict):
return {k: _json_to_widget(v, obj) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [_json_to_widget(v, obj) for v in x]
elif isinstance(x, str) and x.startswith('IPY_MODEL_') and x[10:] in _instances:
return _instances[x[10:]]
else:
return x
widget_serialization = {
'from_json': _json_to_widget,
'to_json': _widget_to_json
}
_binary_types = (memoryview, bytearray, bytes)
def _put_buffers(state, buffer_paths, buffers):
"""The inverse of _remove_buffers, except here we modify the existing dict/lists.
Modifying should be fine, since this is used when state comes from the wire.
"""
for buffer_path, buffer in zip(buffer_paths, buffers):
# we'd like to set say sync_data['x'][0]['y'] = buffer
# where buffer_path in this example would be ['x', 0, 'y']
obj = state
for key in buffer_path[:-1]:
obj = obj[key]
obj[buffer_path[-1]] = buffer
def _separate_buffers(substate, path, buffer_paths, buffers):
"""For internal, see _remove_buffers"""
# remove binary types from dicts and lists, but keep track of their paths
# any part of the dict/list that needs modification will be cloned, so the original stays untouched
# e.g. {'x': {'ar': ar}, 'y': [ar2, ar3]}, where ar/ar2/ar3 are binary types
# will result in {'x': {}, 'y': [None, None]}, [ar, ar2, ar3], [['x', 'ar'], ['y', 0], ['y', 1]]
# instead of removing elements from the list, this will make replacing the buffers on the js side much easier
if isinstance(substate, (list, tuple)):
is_cloned = False
for i, v in enumerate(substate):
if isinstance(v, _binary_types):
if not is_cloned:
substate = list(substate) # shallow clone list/tuple
is_cloned = True
substate[i] = None
buffers.append(v)
buffer_paths.append(path + [i])
elif isinstance(v, (dict, list, tuple)):
vnew = _separate_buffers(v, path + [i], buffer_paths, buffers)
if v is not vnew: # only assign when value changed
if not is_cloned:
substate = list(substate) # clone list/tuple
is_cloned = True
substate[i] = vnew
elif isinstance(substate, dict):
is_cloned = False
for k, v in substate.items():
if isinstance(v, _binary_types):
if not is_cloned:
substate = dict(substate) # shallow clone dict
is_cloned = True
del substate[k]
buffers.append(v)
buffer_paths.append(path + [k])
elif isinstance(v, (dict, list, tuple)):
vnew = _separate_buffers(v, path + [k], buffer_paths, buffers)
if v is not vnew: # only assign when value changed
if not is_cloned:
substate = dict(substate) # clone list/tuple
is_cloned = True
substate[k] = vnew
else:
raise ValueError("expected state to be a list or dict, not %r" % substate)
return substate
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
buffer_paths, buffers = [], []
state = _separate_buffers(state, [], buffer_paths, buffers)
return state, buffer_paths, buffers
def _buffer_list_equal(a, b):
"""Compare two lists of buffers for equality.
Used to decide whether two sequences of buffers (memoryviews,
bytearrays, or python 3 bytes) differ, such that a sync is needed.
Returns True if equal, False if unequal
"""
if len(a) != len(b):
return False
if a == b:
return True
for ia, ib in zip(a, b):
# Check byte equality, since bytes are what is actually synced
# NOTE: Simple ia != ib does not always work as intended, as
# e.g. memoryview(np.frombuffer(ia, dtype='float32')) !=
# memoryview(np.frombuffer(b)), since the format info differs.
# Compare without copying.
if memoryview(ia).cast('B') != memoryview(ib).cast('B'):
return False
return True
class LoggingHasTraits(HasTraits):
"""A parent class for HasTraits that log.
Subclasses have a log trait, and the default behavior
is to get the logger from the currently running Application.
"""
log = Instance('logging.Logger')
@default('log')
def _log_default(self):
from traitlets import log
return log.get_logger()
class CallbackDispatcher(LoggingHasTraits):
"""A structure for registering and running callbacks"""
callbacks = List()
def __call__(self, *args, **kwargs):
"""Call all of the registered callbacks."""
value = None
for callback in self.callbacks:
try:
local_value = callback(*args, **kwargs)
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in callback %s: %s", callback, e, exc_info=True)
else:
ip.showtraceback()
else:
value = local_value if local_value is not None else value
return value
def register_callback(self, callback, remove=False):
"""(Un)Register a callback
Parameters
----------
callback: method handle
Method to be registered or unregistered.
remove=False: bool
Whether to unregister the callback."""
# (Un)Register the callback.
if remove and callback in self.callbacks:
self.callbacks.remove(callback)
elif not remove and callback not in self.callbacks:
self.callbacks.append(callback)
def _show_traceback(method):
"""decorator for showing tracebacks"""
def m(self, *args, **kwargs):
try:
return(method(self, *args, **kwargs))
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in widget method %s: %s", method, e, exc_info=True)
else:
ip.showtraceback()
return m
class WidgetRegistry:
def __init__(self):
self._registry = {}
def register(self, model_module, model_module_version_range, model_name, view_module, view_module_version_range, view_name, klass):
"""Register a value"""
model_module = self._registry.setdefault(model_module, {})
model_version = model_module.setdefault(model_module_version_range, {})
model_name = model_version.setdefault(model_name, {})
view_module = model_name.setdefault(view_module, {})
view_version = view_module.setdefault(view_module_version_range, {})
view_version[view_name] = klass
def get(self, model_module, model_module_version, model_name, view_module, view_module_version, view_name):
"""Get a value"""
module_versions = self._registry[model_module]
# The python semver module doesn't work well, for example, it can't do match('3', '*')
# so we just take the first model module version.
#model_names = next(v for k, v in module_versions.items()
# if semver.match(model_module_version, k))
model_names = list(module_versions.values())[0]
view_modules = model_names[model_name]
view_versions = view_modules[view_module]
# The python semver module doesn't work well, so we just take the first view module version
#view_names = next(v for k, v in view_versions.items()
# if semver.match(view_module_version, k))
view_names = list(view_versions.values())[0]
widget_class = view_names[view_name]
return widget_class
def items(self):
for model_module, mm in sorted(self._registry.items()):
for model_version, mv in sorted(mm.items()):
for model_name, vm in sorted(mv.items()):
for view_module, vv in sorted(vm.items()):
for view_version, vn in sorted(vv.items()):
for view_name, widget in sorted(vn.items()):
yield (model_module, model_version, model_name, view_module, view_version, view_name), widget
# a registry of widgets by module, version, and name so we can create a Python model from widgets
# that are constructed from the frontend.
_registry = WidgetRegistry()
def register(widget):
"""A decorator registering a widget class in the widget registry."""
w = widget.class_traits()
_registry.register(w['_model_module'].default_value,
w['_model_module_version'].default_value,
w['_model_name'].default_value,
w['_view_module'].default_value,
w['_view_module_version'].default_value,
w['_view_name'].default_value,
widget)
return widget
class _staticproperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
assert owner_self is None
return self.fget()
class Widget(LoggingHasTraits):
#-------------------------------------------------------------------------
# Class attributes
#-------------------------------------------------------------------------
_widget_construction_callback = None
_control_comm = None
@_staticproperty
def widgets():
# Because this is a static attribute, it will be accessed when initializing this class. In that case, since a user
# did not explicitly try to use this attribute, we do not want to throw a deprecation warning.
# So we check if the thing calling this static property is one of the known initialization functions in traitlets.
frame = _get_frame(2)
if not (frame.f_code.co_filename == TRAITLETS_FILE and (frame.f_code.co_name in ('getmembers', 'setup_instance', 'setup_class'))):
deprecation("Widget.widgets is deprecated.")
return _instances
@_staticproperty
def _active_widgets():
# Because this is a static attribute, it will be accessed when initializing this class. In that case, since a user
# did not explicitly try to use this attribute, we do not want to throw a deprecation warning.
# So we check if the thing calling this static property is one of the known initialization functions in traitlets.
frame = _get_frame(2)
if not (frame.f_code.co_filename == TRAITLETS_FILE and (frame.f_code.co_name in ('getmembers', 'setup_instance', 'setup_class'))):
deprecation("Widget._active_widgets is deprecated.")
return _instances
@_staticproperty
def _widget_types():
# Because this is a static attribute, it will be accessed when initializing this class. In that case, since a user
# did not explicitly try to use this attribute, we do not want to throw a deprecation warning.
# So we check if the thing calling this static property is one of the known initialization functions in traitlets.
frame = _get_frame(2)
if not (frame.f_code.co_filename == TRAITLETS_FILE and (frame.f_code.co_name in ('getmembers', 'setup_instance', 'setup_class'))):
deprecation("Widget._widget_types is deprecated.")
return _registry
@_staticproperty
def widget_types():
# Because this is a static attribute, it will be accessed when initializing this class. In that case, since a user
# did not explicitly try to use this attribute, we do not want to throw a deprecation warning.
# So we check if the thing calling this static property is one of the known initialization functions in traitlets.
frame = _get_frame(2)
if not (frame.f_code.co_filename == TRAITLETS_FILE and (frame.f_code.co_name in ('getmembers', 'setup_instance', 'setup_class'))):
deprecation("Widget.widget_types is deprecated.")
return _registry
@classmethod
def close_all(cls):
for widget in list(_instances.values()):
widget.close()
@staticmethod
def on_widget_constructed(callback):
"""Registers a callback to be called when a widget is constructed.
The callback must have the following signature:
callback(widget)"""
Widget._widget_construction_callback = callback
@staticmethod
def _call_widget_constructed(widget):
"""Static method, called when a widget is constructed."""
if Widget._widget_construction_callback is not None and callable(Widget._widget_construction_callback):
Widget._widget_construction_callback(widget)
@classmethod
def handle_control_comm_opened(cls, comm, msg):
"""
Class method, called when the comm-open message on the
"jupyter.widget.control" comm channel is received
"""
version = msg.get('metadata', {}).get('version', '')
if version.split('.')[0] != CONTROL_PROTOCOL_VERSION_MAJOR:
raise ValueError("Incompatible widget control protocol versions: received version %r, expected version %r"%(version, __control_protocol_version__))
cls._control_comm = comm
cls._control_comm.on_msg(cls._handle_control_comm_msg)
@classmethod
def _handle_control_comm_msg(cls, msg):
# This shouldn't happen unless someone calls this method manually
if cls._control_comm is None:
raise RuntimeError('Control comm has not been properly opened')
data = msg['content']['data']
method = data['method']
if method == 'request_states':
# Send back the full widgets state
cls.get_manager_state()
widgets = _instances.values()
full_state = {}
drop_defaults = False
for widget in widgets:
full_state[widget.model_id] = {
'model_name': widget._model_name,
'model_module': widget._model_module,
'model_module_version': widget._model_module_version,
'state': widget.get_state(drop_defaults=drop_defaults),
}
full_state, buffer_paths, buffers = _remove_buffers(full_state)
cls._control_comm.send(dict(
method='update_states',
states=full_state,
buffer_paths=buffer_paths
), buffers=buffers)
else:
raise RuntimeError('Unknown front-end to back-end widget control msg with method "%s"' % method)
@staticmethod
def handle_comm_opened(comm, msg):
"""Static method, called when a widget is constructed."""
version = msg.get('metadata', {}).get('version', '')
if version.split('.')[0] != PROTOCOL_VERSION_MAJOR:
raise ValueError("Incompatible widget protocol versions: received version %r, expected version %r"%(version, __protocol_version__))
data = msg['content']['data']
state = data['state']
# Find the widget class to instantiate in the registered widgets
widget_class = _registry.get(state['_model_module'],
state['_model_module_version'],
state['_model_name'],
state['_view_module'],
state['_view_module_version'],
state['_view_name'])
widget = widget_class(comm=comm)
if 'buffer_paths' in data:
_put_buffers(state, data['buffer_paths'], msg['buffers'])
widget.set_state(state)
@staticmethod
def get_manager_state(drop_defaults=False, widgets=None):
"""Returns the full state for a widget manager for embedding
:param drop_defaults: when True, it will not include default value
:param widgets: list with widgets to include in the state (or all widgets when None)
:return:
"""
state = {}
if widgets is None:
widgets = _instances.values()
for widget in widgets:
state[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
return {'version_major': 2, 'version_minor': 0, 'state': state}
def _get_embed_state(self, drop_defaults=False):
state = {
'model_name': self._model_name,
'model_module': self._model_module,
'model_module_version': self._model_module_version
}
model_state, buffer_paths, buffers = _remove_buffers(self.get_state(drop_defaults=drop_defaults))
state['state'] = model_state
if len(buffers) > 0:
state['buffers'] = [{'encoding': 'base64',
'path': p,
'data': standard_b64encode(d).decode('ascii')}
for p, d in zip(buffer_paths, buffers)]
return state
def get_view_spec(self):
return dict(version_major=2, version_minor=0, model_id=self._model_id)
#-------------------------------------------------------------------------
# Traits
#-------------------------------------------------------------------------
_model_name = Unicode('WidgetModel',
help="Name of the model.", read_only=True).tag(sync=True)
_model_module = Unicode('@jupyter-widgets/base',
help="The namespace for the model.", read_only=True).tag(sync=True)
_model_module_version = Unicode(__jupyter_widgets_base_version__,
help="A semver requirement for namespace version containing the model.", read_only=True).tag(sync=True)
_view_name = Unicode(None, allow_none=True,
help="Name of the view.").tag(sync=True)
_view_module = Unicode(None, allow_none=True,
help="The namespace for the view.").tag(sync=True)
_view_module_version = Unicode('',
help="A semver requirement for the namespace version containing the view.").tag(sync=True)
_view_count = Int(None, allow_none=True,
help="EXPERIMENTAL: The number of views of the model displayed in the frontend. This attribute is experimental and may change or be removed in the future. None signifies that views will not be tracked. Set this to 0 to start tracking view creation/deletion.").tag(sync=True)
comm = Any(allow_none=True)
keys = List(help="The traits which are synced.")
@default('keys')
def _default_keys(self):
return [name for name in self.traits(sync=True)]
_property_lock = Dict()
_holding_sync = False
_states_to_send = Set()
_msg_callbacks = Instance(CallbackDispatcher, ())
#-------------------------------------------------------------------------
# (Con/de)structor
#-------------------------------------------------------------------------
def __init__(self, **kwargs):
"""Public constructor"""
self._model_id = kwargs.pop('model_id', None)
super().__init__(**kwargs)
Widget._call_widget_constructed(self)
self.open()
def __copy__(self):
raise NotImplementedError("Widgets cannot be copied; custom implementation required")
def __deepcopy__(self, memo):
raise NotImplementedError("Widgets cannot be copied; custom implementation required")
def __del__(self):
"""Object disposal"""
self.close()
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
def open(self):
"""Open a comm to the frontend if one isn't already open."""
if self.comm is None:
state, buffer_paths, buffers = _remove_buffers(self.get_state())
args = dict(target_name='jupyter.widget',
data={'state': state, 'buffer_paths': buffer_paths},
buffers=buffers,
metadata={'version': __protocol_version__}
)
if self._model_id is not None:
args['comm_id'] = self._model_id
self.comm = comm.create_comm(**args)
@observe('comm')
def _comm_changed(self, change):
"""Called when the comm is changed."""
if change['new'] is None:
return
self._model_id = self.model_id
self.comm.on_msg(self._handle_msg)
_instances[self.model_id] = self
@property
def model_id(self):
"""Gets the model id of this widget.
If a Comm doesn't exist yet, a Comm will be created automagically."""
return self.comm.comm_id
#-------------------------------------------------------------------------
# Methods
#-------------------------------------------------------------------------
def close(self):
"""Close method.
Closes the underlying comm.
When the comm is closed, all of the widget views are automatically
removed from the front-end."""
if self.comm is not None:
_instances.pop(self.model_id, None)
self.comm.close()
self.comm = None
self._repr_mimebundle_ = None
def send_state(self, key=None):
"""Sends the widget state, or a piece of it, to the front-end, if it exists.
Parameters
----------
key : unicode, or iterable (optional)
A single property's name or iterable of property names to sync with the front-end.
"""
state = self.get_state(key=key)
if len(state) > 0:
if self._property_lock: # we need to keep this dict up to date with the front-end values
for name, value in state.items():
if name in self._property_lock:
self._property_lock[name] = value
state, buffer_paths, buffers = _remove_buffers(state)
msg = {'method': 'update', 'state': state, 'buffer_paths': buffer_paths}
self._send(msg, buffers=buffers)
def get_state(self, key=None, drop_defaults=False):
"""Gets the widget state, or a piece of it.
Parameters
----------
key : unicode or iterable (optional)
A single property's name or iterable of property names to get.
Returns
-------
state : dict of states
metadata : dict
metadata for each field: {key: metadata}
"""
if key is None:
keys = self.keys
elif isinstance(key, str):
keys = [key]
elif isinstance(key, Iterable):
keys = key
else:
raise ValueError("key must be a string, an iterable of keys, or None")
state = {}
traits = self.traits()
for k in keys:
to_json = self.trait_metadata(k, 'to_json', self._trait_to_json)
value = to_json(getattr(self, k), self)
if not drop_defaults or not self._compare(value, traits[k].default_value):
state[k] = value
return state
def _is_numpy(self, x):
return x.__class__.__name__ == 'ndarray' and x.__class__.__module__ == 'numpy'
def _compare(self, a, b):
if self._is_numpy(a) or self._is_numpy(b):
import numpy as np
return np.array_equal(a, b)
else:
return a == b
def set_state(self, sync_data):
"""Called when a state is received from the front-end."""
# Send an echo update message immediately
if JUPYTER_WIDGETS_ECHO:
echo_state = {}
for attr, value in sync_data.items():
if attr in self.keys and self.trait_metadata(attr, 'echo_update', default=True):
echo_state[attr] = value
if echo_state:
echo_state, echo_buffer_paths, echo_buffers = _remove_buffers(echo_state)
msg = {
'method': 'echo_update',
'state': echo_state,
'buffer_paths': echo_buffer_paths,
}
self._send(msg, buffers=echo_buffers)
# The order of these context managers is important. Properties must
# be locked when the hold_trait_notification context manager is
# released and notifications are fired.
with self._lock_property(**sync_data), self.hold_trait_notifications():
for name in sync_data:
if name in self.keys:
from_json = self.trait_metadata(name, 'from_json',
self._trait_from_json)
self.set_trait(name, from_json(sync_data[name], self))
def send(self, content, buffers=None):
"""Sends a custom msg to the widget model in the front-end.
Parameters
----------
content : dict
Content of the message to send.
buffers : list of binary buffers
Binary buffers to send with message
"""
self._send({"method": "custom", "content": content}, buffers=buffers)
def on_msg(self, callback, remove=False):
"""(Un)Register a custom msg receive callback.
Parameters
----------
callback: callable
callback will be passed three arguments when a message arrives::
callback(widget, content, buffers)
remove: bool
True if the callback should be unregistered."""
self._msg_callbacks.register_callback(callback, remove=remove)
def add_traits(self, **traits):
"""Dynamically add trait attributes to the Widget."""
super().add_traits(**traits)
for name, trait in traits.items():
if 'sync' in trait.metadata:
self.keys.append(name)
self.send_state(name)
def notify_change(self, change):
"""Called when a property has changed."""
# Send the state to the frontend before the user-registered callbacks
# are called.
name = change['name']
if self.comm is not None and getattr(self.comm, 'kernel', True) is not None:
# Make sure this isn't information that the front-end just sent us.
if name in self.keys and self._should_send_property(name, getattr(self, name)):
# Send new state to front-end
self.send_state(key=name)
super().notify_change(change)
def __repr__(self):
return self._gen_repr_from_keys(self._repr_keys())
#-------------------------------------------------------------------------
# Support methods
#-------------------------------------------------------------------------
@contextmanager
def _lock_property(self, **properties):
"""Lock a property-value pair.
The value should be the JSON state of the property.
NOTE: This, in addition to the single lock for all state changes, is
flawed. In the future we may want to look into buffering state changes
back to the front-end."""
self._property_lock = properties
try:
yield
finally:
self._property_lock = {}
@contextmanager
def hold_sync(self):
"""Hold syncing any state until the outermost context manager exits"""
if self._holding_sync is True:
yield
else:
try:
self._holding_sync = True
yield
finally:
self._holding_sync = False
self.send_state(self._states_to_send)
self._states_to_send.clear()
def _should_send_property(self, key, value):
"""Check the property lock (property_lock)"""
to_json = self.trait_metadata(key, 'to_json', self._trait_to_json)
if key in self._property_lock:
# model_state, buffer_paths, buffers
split_value = _remove_buffers({ key: to_json(value, self)})
split_lock = _remove_buffers({ key: self._property_lock[key]})
# A roundtrip conversion through json in the comparison takes care of
# idiosyncracies of how python data structures map to json, for example
# tuples get converted to lists.
if (jsonloads(jsondumps(split_value[0])) == split_lock[0]
and split_value[1] == split_lock[1]
and _buffer_list_equal(split_value[2], split_lock[2])):
if self._holding_sync:
self._states_to_send.discard(key)
return False
if self._holding_sync:
self._states_to_send.add(key)
return False
else:
return True
# Event handlers
@_show_traceback
def _handle_msg(self, msg):
"""Called when a msg is received from the front-end"""
data = msg['content']['data']
method = data['method']
if method == 'update':
if 'state' in data:
state = data['state']
if 'buffer_paths' in data:
_put_buffers(state, data['buffer_paths'], msg['buffers'])
self.set_state(state)
# Handle a state request.
elif method == 'request_state':
self.send_state()
# Handle a custom msg from the front-end.
elif method == 'custom':
if 'content' in data:
self._handle_custom_msg(data['content'], msg['buffers'])
# Catch remainder.
else:
self.log.error('Unknown front-end to back-end widget msg with method "%s"' % method)
def _handle_custom_msg(self, content, buffers):
"""Called when a custom msg is received."""
self._msg_callbacks(self, content, buffers)
@staticmethod
def _trait_to_json(x, self):
"""Convert a trait value to json."""
return x
@staticmethod
def _trait_from_json(x, self):
"""Convert json values to objects."""
return x
def _repr_mimebundle_(self, **kwargs):
plaintext = repr(self)
if len(plaintext) > 110:
plaintext = plaintext[:110] + '…'
data = {
'text/plain': plaintext,
}
if self._view_name is not None:
# The 'application/vnd.jupyter.widget-view+json' mimetype has not been registered yet.
# See the registration process and naming convention at
# http://tools.ietf.org/html/rfc6838
# and the currently registered mimetypes at
# http://www.iana.org/assignments/media-types/media-types.xhtml.
data['application/vnd.jupyter.widget-view+json'] = {
'version_major': 2,
'version_minor': 0,
'model_id': self._model_id
}
return data
def _send(self, msg, buffers=None):
"""Sends a message to the model in the front-end."""
if self.comm is not None and (self.comm.kernel is not None if hasattr(self.comm, "kernel") else True):
self.comm.send(data=msg, buffers=buffers)
def _repr_keys(self):
traits = self.traits()
for key in sorted(self.keys):
# Exclude traits that start with an underscore
if key[0] == '_':
continue
# Exclude traits who are equal to their default value
value = getattr(self, key)
trait = traits[key]
if self._compare(value, trait.default_value):
continue
elif (isinstance(trait, (Container, Dict)) and
trait.default_value == Undefined and
(value is None or len(value) == 0)):
# Empty container, and dynamic default will be empty
continue
yield key
def _gen_repr_from_keys(self, keys):
class_name = self.__class__.__name__
signature = ', '.join(
'{}={!r}'.format(key, getattr(self, key))
for key in keys
)
return '{}({})'.format(class_name, signature)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@[email protected]@.PATH_END.py
|
{
"filename": "population.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/pymc/sampling/population.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specializes on running MCMCs with population step methods."""
import logging
import warnings
from collections.abc import Iterator, Sequence
from copy import copy
from typing import TypeAlias
import cloudpickle
import numpy as np
from rich.progress import BarColumn, TextColumn, TimeElapsedColumn, TimeRemainingColumn
from pymc.backends.base import BaseTrace
from pymc.initial_point import PointType
from pymc.model import Model, modelcontext
from pymc.stats.convergence import log_warning_stats
from pymc.step_methods import CompoundStep
from pymc.step_methods.arraystep import (
BlockedStep,
PopulationArrayStepShared,
StatsType,
)
from pymc.step_methods.metropolis import DEMetropolis
from pymc.util import CustomProgress
__all__ = ()
Step: TypeAlias = BlockedStep | CompoundStep
_log = logging.getLogger(__name__)
def _sample_population(
*,
initial_points: Sequence[PointType],
draws: int,
start: Sequence[PointType],
rngs: Sequence[np.random.Generator],
step: BlockedStep | CompoundStep,
tune: int,
model: Model,
progressbar: bool = True,
parallelize: bool = False,
traces: Sequence[BaseTrace],
**kwargs,
):
"""Perform sampling of a population of chains using the ``PopulationStepper``.
Parameters
----------
draws : int
The number of samples to draw
start : list
Start points for each chain
rngs: sequence of random Generators
A list of :py:class:`~numpy.random.Generator` objects, one for each chain
step : function
Step function (should be or contain a population step method)
tune : int
Number of iterations to tune.
model : Model (optional if in ``with`` context)
progressbar : bool
Show progress bars? (defaults to True)
parallelize : bool
Setting for multiprocess parallelization
"""
warn_population_size(
step=step,
initial_points=initial_points,
model=model,
chains=len(traces),
)
sampling = _prepare_iter_population(
draws=draws,
step=step,
start=start,
parallelize=parallelize,
traces=traces,
tune=tune,
model=model,
rngs=rngs,
progressbar=progressbar,
)
with CustomProgress(disable=not progressbar) as progress:
task = progress.add_task("[red]Sampling...", total=draws)
for _ in sampling:
progress.update(task)
return
def warn_population_size(
*,
step: BlockedStep | CompoundStep,
initial_points: Sequence[PointType],
model: Model,
chains: int,
):
"""Emit informative errors/warnings for dangerously small population size."""
has_demcmc = np.any(
[
isinstance(m, DEMetropolis)
for m in (step.methods if isinstance(step, CompoundStep) else [step])
]
)
initial_point_model_size = sum(initial_points[0][n.name].size for n in model.value_vars)
if has_demcmc and chains < 3:
raise ValueError(
"DEMetropolis requires at least 3 chains. "
f"For this {initial_point_model_size}-dimensional model you should use ≥{initial_point_model_size + 1} chains"
)
if has_demcmc and chains <= initial_point_model_size:
warnings.warn(
"DEMetropolis should be used with more chains than dimensions! "
f"(The model has {initial_point_model_size} dimensions.)",
UserWarning,
stacklevel=2,
)
return
class PopulationStepper:
"""Wraps population of step methods to step them in parallel with single or multiprocessing."""
def __init__(self, steppers, parallelize: bool, progressbar: bool = True):
"""Use multiprocessing to parallelize chains.
Falls back to sequential evaluation if multiprocessing fails.
In the multiprocessing mode of operation, a new process is started for each
chain/stepper and Pipes are used to communicate with the main process.
Parameters
----------
steppers : list
A collection of independent step methods, one for each chain.
parallelize : bool
Indicates if parallelization via multiprocessing is desired.
progressbar : bool
Should we display a progress bar showing relative progress?
"""
self.nchains = len(steppers)
self.is_parallelized = False
self._primary_ends = []
self._processes = []
self._steppers = steppers
self._progress = None
if parallelize:
try:
# configure a child process for each stepper
_log.info(
"Attempting to parallelize chains to all cores. You can turn this off with `pm.sample(cores=1)`."
)
import multiprocessing
with CustomProgress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn(),
TextColumn("/"),
TimeElapsedColumn(),
disable=not progressbar,
) as self._progress:
for c, stepper in enumerate(steppers):
# enumerate(progress_bar(steppers)) if progressbar else enumerate(steppers)
# ):
task = self._progress.add_task(description=f"Chain {c}")
secondary_end, primary_end = multiprocessing.Pipe()
stepper_dumps = cloudpickle.dumps(stepper, protocol=4)
process = multiprocessing.Process(
target=self.__class__._run_secondary,
args=(c, stepper_dumps, secondary_end, task, self._progress),
name=f"ChainWalker{c}",
)
# we want the child process to exit if the parent is terminated
process.daemon = True
# Starting the process might fail and takes time.
# By doing it in the constructor, the sampling progress bar
# will not be confused by the process start.
process.start()
self._primary_ends.append(primary_end)
self._processes.append(process)
self.is_parallelized = True
except Exception:
_log.info(
"Population parallelization failed. "
"Falling back to sequential stepping of chains."
)
_log.debug("Error was: ", exc_info=True)
else:
_log.info(
"Chains are not parallelized. You can enable this by passing "
"`pm.sample(cores=n)`, where n > 1."
)
return super().__init__()
def __enter__(self):
"""Do nothing: processes are already started in ``__init__``."""
return
def __exit__(self, exc_type, exc_val, exc_tb):
if len(self._processes) > 0:
try:
for primary_end in self._primary_ends:
primary_end.send(None)
for process in self._processes:
process.join(timeout=3)
except Exception:
_log.warning("Termination failed.")
return
@staticmethod
def _run_secondary(c, stepper_dumps, secondary_end, task, progress):
"""Perform stepping of a chain from a separate process.
Parameters
----------
c : int
number of this chain
stepper : BlockedStep
a step method such as CompoundStep
secondary_end : multiprocessing.connection.PipeConnection
This is our connection to the main process
task : progress.Task
The progress task for this chain
progress : progress.Progress
The progress bar
"""
try:
stepper = cloudpickle.loads(stepper_dumps)
# the stepper is not necessarily a PopulationArraySharedStep itself,
# but rather a CompoundStep. PopulationArrayStepShared.population
# has to be updated, therefore we identify the substeppers first.
population_steppers = []
for sm in stepper.methods if isinstance(stepper, CompoundStep) else [stepper]:
if isinstance(sm, PopulationArrayStepShared):
population_steppers.append(sm)
while True:
incoming = secondary_end.recv()
# receiving a None is the signal to exit
if incoming is None:
break
tune_stop, population = incoming
if tune_stop:
stepper.stop_tuning()
# forward the population to the PopulationArrayStepShared objects
# This is necessary because due to the process fork, the population
# object is no longer shared between the steppers.
for popstep in population_steppers:
popstep.population = population
update = stepper.step(population[c])
progress.advance(task)
secondary_end.send(update)
except Exception:
_log.exception(f"ChainWalker{c}")
return
def step(self, tune_stop: bool, population) -> list[tuple[PointType, StatsType]]:
"""Step the entire population of chains.
Parameters
----------
tune_stop : bool
Indicates if the condition (i == tune) is fulfilled
population : list
Current Points of all chains
Returns
-------
update : list
List of (Point, stats) tuples for all chains
"""
updates: list[tuple[PointType, StatsType]] = []
if self.is_parallelized:
for c in range(self.nchains):
self._primary_ends[c].send((tune_stop, population))
# Blockingly get the step outcomes
for c in range(self.nchains):
updates.append(self._primary_ends[c].recv())
else:
for c in range(self.nchains):
if tune_stop:
self._steppers[c].stop_tuning()
updates.append(self._steppers[c].step(population[c]))
return updates
def _prepare_iter_population(
*,
draws: int,
step,
start: Sequence[PointType],
parallelize: bool,
traces: Sequence[BaseTrace],
tune: int,
rngs: Sequence[np.random.Generator],
model=None,
progressbar=True,
) -> Iterator[int]:
"""Prepare a PopulationStepper and traces for population sampling.
Parameters
----------
draws : int
The number of samples to draw
step : function
Step function (should be or contain a population step method)
start : list
Start points for each chain
parallelize : bool
Setting for multiprocess parallelization
tune : int
Number of iterations to tune.
rngs: sequence of random Generators
A list of :py:class:`~numpy.random.Generator` objects, one for each chain
model : Model (optional if in ``with`` context)
progressbar : bool
``progressbar`` argument for the ``PopulationStepper``, (defaults to True)
Returns
-------
_iter_population : generator
Main sampling iterator yieling the iteration number.
"""
nchains = len(start)
model = modelcontext(model)
draws = int(draws)
if draws < 1:
raise ValueError("Argument `draws` should be above 0.")
# The initialization of traces, samplers and points must happen in the right order:
# 1. population of points is created
# 2. steppers are initialized and linked to the points object
# 3. a PopulationStepper is configured for parallelized stepping
# 1. create a population (points) that tracks each chain
# it is updated as the chains are advanced
population = [start[c] for c in range(nchains)]
# 2. Set up the steppers
steppers: list[Step] = []
assert (
len(rngs) == nchains
), f"There must be one random Generator per chain. Got {len(rngs)} instead of {nchains}"
for c, rng in enumerate(rngs):
# need independent samplers for each chain
# it is important to copy the actual steppers (but not the delta_logp)
if isinstance(step, CompoundStep):
chainstep = CompoundStep([copy(m) for m in step.methods])
else:
chainstep = copy(step)
chainstep.set_rng(rng)
# link population samplers to the shared population state
for sm in chainstep.methods if isinstance(step, CompoundStep) else [chainstep]:
if isinstance(sm, PopulationArrayStepShared):
sm.link_population(population, c)
steppers.append(chainstep)
# 3. configure the PopulationStepper (expensive call)
popstep = PopulationStepper(steppers, parallelize, progressbar=progressbar)
# Because the preparations above are expensive, the actual iterator is
# in another method. This way the progbar will not be disturbed.
return _iter_population(
draws=draws, tune=tune, popstep=popstep, steppers=steppers, traces=traces, points=population
)
def _iter_population(
*,
draws: int,
tune: int,
popstep: PopulationStepper,
steppers,
traces: Sequence[BaseTrace],
points,
) -> Iterator[int]:
"""Iterate a ``PopulationStepper``.
Parameters
----------
draws : int
number of draws per chain
tune : int
number of tuning steps
popstep : PopulationStepper
the helper object for (parallelized) stepping of chains
steppers : list
The step methods for each chain
traces : list
Traces for each chain
points : list
population of chain states
Yields
------
i
Iteration number.
"""
try:
with popstep:
# iterate draws of all chains
for i in range(draws):
# this call steps all chains and returns a list of (point, stats)
# the `popstep` may interact with subprocesses internally
updates = popstep.step(i == tune, points)
# apply the update to the points and record to the traces
for c, strace in enumerate(traces):
points[c], stats = updates[c]
strace.record(points[c], stats)
log_warning_stats(stats)
# yield the state of all chains in parallel
yield i
except KeyboardInterrupt:
for c, strace in enumerate(traces):
strace.close()
if hasattr(steppers[c], "report"):
steppers[c].report._finalize(strace)
raise
except BaseException:
for c, strace in enumerate(traces):
strace.close()
raise
else:
for c, strace in enumerate(traces):
strace.close()
if hasattr(steppers[c], "report"):
steppers[c].report._finalize(strace)
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@pymc@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "chandra-marx/marxs",
"repo_path": "marxs_extracted/marxs-main/marxs/math/tests/__init__.py",
"type": "Python"
}
|
chandra-marxREPO_NAMEmarxsPATH_START.@marxs_extracted@marxs-main@marxs@math@tests@[email protected]_END.py
|
|
{
"filename": "plot_stellar_yield_table.py",
"repo_name": "Azeret/galIMF",
"repo_path": "galIMF_extracted/galIMF-master/plot_stellar_yield_table.py",
"type": "Python"
}
|
import time
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import element_abundances_solar
reference_name = 'Anders1989'
H_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'H')
# He_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'He')
C_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'C')
# N_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'N')
O_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'O')
Mg_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Mg')
Fe_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Fe')
Si_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Si')
Ca_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Ca')
def plot_lifetime_and_finalmass():
Z2_list = [0.0004, 0.004, 0.008, 0.012]
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z=0.004.txt', 'r')
data = file.readlines()
file.close()
list2 = str.split(data[3])
list_ini_mass = []
for j in list2:
list_ini_mass.append(math.log(float(j), 10))
list_fin_mass = []
i = len(Z2_list) - 1
while i > -1:
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_fin_mass.append(list)
(i) = (i - 1)
color_list_ = []
for i in range(len(list_fin_mass)):
ZZZ = list_fin_mass[i][0]
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(21, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(-1.5, 1.5)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_fin_mass[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$($M_{\rm *, final}$ [$M_\odot$])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_final_mass.pdf', dpi=250)
list_lifetime = []
i = len(Z2_list) - 1
while i > -1:
file = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_lifetime.append(list)
(i) = (i - 1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(22, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(6, 15)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_lifetime[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$(life time [yr])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_lifetime.pdf', dpi=250)
##########
Metallicity_origen = [0.008, 0.02]
Age_origen = [
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
[7.92E+10, 4.45E+10, 2.61E+10, 1.59E+10, 1.03E+10, 6.89E+09, 4.73E+09, 3.59E+09, 2.87E+09, 2.64E+09, 2.18E+09,
1.84E+09, 1.59E+09, 1.38E+09, 1.21E+09, 7.64E+08, 4.56E+08, 2.03E+08, 1.15E+08, 7.45E+07, 5.31E+07, 3.17E+07,
1.89E+07, 1.33E+07, 9.15E+06, 6.13E+06, 5.12E+06, 4.12E+06, 3.39E+06, 3.23E+06]]
Age_012 = []
for i in range(len(Age_origen[0])):
Age_012.append((Age_origen[0][i]*2+Age_origen[1][i])/3)
Remnant_mass_origen = [
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
[1.31, 1.44, 1.87, 2.11, 7.18, 2.06, 2.09, 2.11]
]
Remnant_mass_012 = []
for i in range(len(Remnant_mass_origen[0])):
Remnant_mass_012.append((Remnant_mass_origen[0][i]*2+Remnant_mass_origen[1][i])/3)
Mass = [0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0,
7.0, 9.0, 12., 15., 20., 30., 40., 60., 100, 120]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Age = [
[4.28E+10, 2.37E+10, 1.41E+10, 8.97E+09, 6.03E+09, 4.23E+09, 3.08E+09, 2.34E+09, 1.92E+09, 1.66E+09, 1.39E+09,
1.18E+09, 1.11E+09, 9.66E+08, 8.33E+08, 4.64E+08, 3.03E+08, 1.61E+08, 1.01E+08, 7.15E+07, 5.33E+07, 3.42E+07,
2.13E+07, 1.54E+07, 1.06E+07, 6.90E+06, 5.45E+06, 4.20E+06, 3.32E+06, 3.11E+06],
[5.35E+10, 2.95E+10, 1.73E+10, 1.09E+10, 7.13E+09, 4.93E+09, 3.52E+09, 2.64E+09, 2.39E+09, 1.95E+09, 1.63E+09,
1.28E+09, 1.25E+09, 1.23E+09, 1.08E+09, 5.98E+08, 3.67E+08, 1.82E+08, 1.11E+08, 7.62E+07, 5.61E+07, 3.51E+07,
2.14E+07, 1.52E+07, 1.05E+07, 6.85E+06, 5.44E+06, 4.19E+06, 3.38E+06, 3.23E+06],
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
Age_012]
len_mass = len(Mass)
log_Mass = []
for i in range(len_mass):
log_Mass.append(math.log(Mass[i], 10))
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Age = []
for i in range(len_metal):
log_Age.append([])
for j in range(len_mass):
log_Age[i].append(math.log(Age[i][j], 10))
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(4, 4))
i = 0
while i < len(Z2_list):
ZZZ = list_fin_mass[i][0]
Z_box = round(math.log(ZZZ, 10)-math.log(0.01886, 10), 2)
axs[0].plot(list_ini_mass, list_lifetime[i][1], lw=(6-i)/2, label='Z={}, [Z]={}'.format(ZZZ, Z_box), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[0].scatter(log_Mass, log_Age[i], s=3, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
axs[0].plot([-1, 2], [7, 7])
axs[0].plot([math.log(17, 10), math.log(17, 10)], [6, 15])
# axs[0].set_yticks(np.arange(6, 16, 2))
axs[0].set_ylim(6, 15)
axs[0].set_ylabel(r'log$_{10}$(life time [yr])')
axs[0].legend(prop={'size': 6}, loc='best')
Mass = [
[9, 12, 15, 20, 30, 40, 60, 100, 120],
[9, 12, 15, 20, 30, 40, 100, 120],
[9, 12, 15, 20, 30, 40, 60, 120],
[9, 12, 15, 20, 30, 40, 60, 120]
]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Remnant_mass = [
[1.35, 1.5, 1.8, 2.07, 6.98, 14.91, 24.58, 32.06, 30.6],
[1.35, 1.5, 1.82, 2.04, 6.98, 12.6, 36.7, 35.2],
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
Remnant_mass_012
]
#################################################################
# WW95_solar = 0.01886
# Metallicity_WW95 = [0, WW95_solar*10**-4, WW95_solar*0.01, WW95_solar*0.1, WW95_solar]
# Mass_WW95 = [12, 13, 15, 18, 20, 22, 25, 30, 35, 40]
# Remnant_mass_WW95_B = [
# [1.32, 1.46, 1.43, 1.76, 2.06, 2.02, 2.07, 1.94, 3.86, 5.45],
# [1.38, 1.31, 1.49, 1.69, 1.97, 2.12, 1.99, 2.01, 3.39, 4.45],
# [1.40, 1.44, 1.56, 1.58, 1.98, 2.04, 1.87, 2.21, 2.42, 4.42],
# [1.28, 1.44, 1.63, 1.61, 1.97, 2.01, 1.87, 2.08, 3.03, 4.09],
# [1.35, 1.28, 1.53, 3.40, 4.12, 1.49, 1.90, 1.54, 7.62, 12.2]
# ]
# Interpolation_remnant_mass_WW95_B = interpolate.interp2d(Mass_WW95, Metallicity_WW95, Remnant_mass_WW95_B)
# Remnant_mass_WW95_B_new = []
# for i in range(len(Metallicity)):
# Remnant_mass_WW95_B_new.append([])
# for j in range(len(Mass_WW95)):
# Remnant_mass_WW95_B_new[i].append(Interpolation_remnant_mass_WW95_B(Mass_WW95[j], Metallicity[i]))
#
# log_Remnant_mass_WW95_B = []
# for i in range(len_metal):
# log_Remnant_mass_WW95_B.append([])
# for j in range(len(Remnant_mass_WW95_B[i])):
# log_Remnant_mass_WW95_B[i].append(math.log(Remnant_mass_WW95_B[i][j], 10))
#
# log_mass_WW95 = []
# for i in range(len(Mass_WW95)):
# log_mass_WW95.append(math.log(Mass_WW95[i], 10))
#################################################################
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Remnant_mass = []
for i in range(len_metal):
log_Remnant_mass.append([])
for j in range(len(Remnant_mass[i])):
log_Remnant_mass[i].append(math.log(Remnant_mass[i][j], 10))
log_mass = []
for i in range(len_metal):
log_mass.append([])
for j in range(len(Mass[i])):
log_mass[i].append(math.log(Mass[i][j], 10))
# print(log_mass)
# print(len(log_mass[0]))
# print(len(log_mass))
# print(len(log_Remnant_mass[0]))
i = 0
while i < len(Z2_list):
axs[1].plot(list_ini_mass, list_fin_mass[i][1], lw=(6-i)/2, label='Z={}'.format(list_fin_mass[i][0]), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[1].scatter(log_mass[i], log_Remnant_mass[i], s=10, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
# i = len_metal-1
# # while i > -1:
# # axs[1].scatter(log_mass_WW95, log_Remnant_mass_WW95_B[i], s=10, marker='^', edgecolors='w', linewidth='0.1', zorder=10)
# # (i) = (i - 1)
axs[1].set_yticks(np.arange(-2, 2, 1))
axs[1].set_ylim(-1.5, 1.5)
axs[1].set_ylabel(r'log$_{10}(M_{\rm *, final}$ [$M_\odot$])')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
plt.savefig('Interpolated_stellar_lifetime_final_mass.pdf', dpi=250)
plt.show()
return
def function_read_file(yield_table_name):
####################
### read in file ###
####################
if yield_table_name == "portinari98":
file_yield = open(
'yield_tables/agb_and_massive_stars_portinari98_marigo01_gce_totalyields.txt', 'r')
# 'yield_tables/agb_and_massive_stars_portinari98_marigo01.txt', 'r')
# Use net yields of Portinari and Marigo
# Net yields with masses up to 7Msun are from Marigo, above those of Portinari are taken.
# Only isotopes are selected which are available in both yield sets and go up to Fe.
# Initial masses go from the lowest mass available up to 100Msun.
# Yield set ID M01P98 in Ritter et al. 2017.
# References: Marigo et al. 2001, http://ukads.nottingham.ac.uk/abs/2001A%26A...370..194M
# Portinari et al. 1998, http://ukads.nottingham.ac.uk/abs/1998A%26A...334..505P
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "Kobayashi06":
file_yield = open(
'yield_tables/agb_and_massive_stars_Kobayashi06_marigo01_gce_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "WW95":
file_yield = open(
'yield_tables/massive_stars_WW95_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "marigo01":
file_yield = open(
'yield_tables/agb_marigo01_totalyields.txt', 'r')
data = file_yield.readlines()
file_yield.close()
###########################
### extract information ###
###########################
#
H_relative_line_number = function_get_element_line_number(data, 'H-1')
He_relative_line_number = function_get_element_line_number(data, 'He-4')
C_relative_line_number = function_get_element_line_number(data, 'C-12')
N_relative_line_number = function_get_element_line_number(data, 'N-14')
O_relative_line_number = function_get_element_line_number(data, 'O-16')
Ne_relative_line_number = function_get_element_line_number(data, 'Ne-20')
Mg_relative_line_number = function_get_element_line_number(data, 'Mg-24')
Si_relative_line_number = function_get_element_line_number(data, 'Si-28')
S_relative_line_number = function_get_element_line_number(data, 'S-32')
Ca_relative_line_number = function_get_element_line_number(data, 'Ca-40')
Fe_relative_line_number = function_get_element_line_number(data, 'Fe-56')
#
global M_list, Z_list, eject_mass_list, H_eject_mass_list, He_eject_mass_list, C_eject_mass_list, \
N_eject_mass_list, O_eject_mass_list, Ne_eject_mass_list, Mg_eject_mass_list, Si_eject_mass_list, \
S_eject_mass_list, Ca_eject_mass_list, Fe_eject_mass_list, Metal_eject_mass_list
global O_over_Mg_list, Mg_over_Fe_list, Ca_over_Fe_list, Si_over_Fe_list, C_over_H_list, Mg_over_H_list, \
Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_H_list, \
Z_over_X_list, Z_over_Z0_list, XXX_list, YYY_list, ZZZ_list, O_over_Fe_list
#
i = len(data)-1
while i > -1:
line_i = str.split(data[i])
if line_i[1] == 'Table:':
line_H = str.split(data[i + H_relative_line_number])
line_He = str.split(data[i + He_relative_line_number])
line_C = str.split(data[i + C_relative_line_number])
line_N = str.split(data[i + N_relative_line_number])
line_O = str.split(data[i + O_relative_line_number])
line_Ne = str.split(data[i + Ne_relative_line_number])
line_Mg = str.split(data[i + Mg_relative_line_number])
line_Si = str.split(data[i + Si_relative_line_number])
line_S = str.split(data[i + S_relative_line_number])
line_Ca = str.split(data[i + Ca_relative_line_number])
line_Fe = str.split(data[i + Fe_relative_line_number])
line_Mfinal = str.split(data[i + 2])
(Z, M) = function_get_Z_M(line_i[2]) # metallicity and mass of the star
ejecta_mass = round((M - function_get_Mfinal(line_Mfinal[2])), 5) ####################
H_mass = function_get_element_mass(line_H[1])
He_mass = function_get_element_mass(line_He[1])
C_mass = function_get_element_mass(line_C[1])
N_mass = function_get_element_mass(line_N[1])
O_mass = function_get_element_mass(line_O[1])
Ne_mass = function_get_element_mass(line_Ne[1])
Mg_mass = function_get_element_mass(line_Mg[1])
Si_mass = function_get_element_mass(line_Si[1])
S_mass = function_get_element_mass(line_S[1])
Ca_mass = function_get_element_mass(line_Ca[1])
Fe_mass = function_get_element_mass(line_Fe[1])
H_num = H_mass/1.0079
C_num = C_mass/12.011
N_num = N_mass/14.007
O_num = O_mass/15.9994
Ne_num = Ne_mass/20.18
Mg_num = Mg_mass/24.305
Si_num = Si_mass/28.085
S_num = S_mass/32.06
Ca_num = Ca_mass/40.078
Fe_num = Fe_mass/55.845
Metal_num = C_num+N_num+O_num+Ne_num+Mg_num+Si_num+S_num+Ca_num+Fe_num
O_over_Mg = math.log(O_num/Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
Mg_over_H = math.log(Mg_num/H_num, 10) - Mg_abundances_solar + H_abundances_solar
Si_over_H = math.log(Si_num/H_num, 10) - Si_abundances_solar + H_abundances_solar
C_over_H = math.log(C_num/H_num, 10) - C_abundances_solar + H_abundances_solar
Fe_over_H = math.log(Fe_num/H_num, 10) - Fe_abundances_solar + H_abundances_solar
O_over_H = math.log(O_num/H_num, 10) - O_abundances_solar + H_abundances_solar
Mg_over_Fe = math.log(Mg_num/Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Ca_over_Fe = math.log(Ca_num/Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
Si_over_Fe = math.log(Si_num/Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
O_over_Fe = math.log(O_num/Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
Metal_mass = round((ejecta_mass - H_mass - He_mass), 5) ####################
# Metal_mass = round((C_mass+N_mass+O_mass+Ne_mass+Mg_mass+Si_mass+S_mass+Ca_mass+Fe_mass), 5) ###### the same ######
if Metal_mass<0:
print("Warning: Metal_mass=", Metal_mass, "<0")
print("check stellar yield table with metallicity and mass being:", Z, "&", M)
Metal_mass = 0
Z_over_X = math.log(Metal_mass / H_mass, 10) - math.log(0.01886 / 0.7381, 10)
Z_over_Z0 = math.log(Metal_mass / ejecta_mass, 10) - math.log(0.01886, 10)
Z_over_H = math.log(Metal_num / H_num, 10) - math.log(0.01886 / 18 / 0.7381, 10) # where 18 is the estimated average atomic weight over the weight of hydrogen.
XXX = H_mass / ejecta_mass
YYY = He_mass / ejecta_mass
ZZZ = Metal_mass / ejecta_mass
if len(Z_list) == 0:
Z_list.append(Z)
Z_n = 0
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Si_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
O_over_Fe_list.append([])
if Z != Z_list[-1]:
Z_list.append(Z)
Z_n += 1
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Si_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Fe_list.append([])
M_list[Z_n].append(M)
eject_mass_list[Z_n].append(ejecta_mass)
H_eject_mass_list[Z_n].append(H_mass)
He_eject_mass_list[Z_n].append(He_mass)
C_eject_mass_list[Z_n].append(C_mass)
N_eject_mass_list[Z_n].append(N_mass)
O_eject_mass_list[Z_n].append(O_mass)
Ne_eject_mass_list[Z_n].append(Ne_mass)
Mg_eject_mass_list[Z_n].append(Mg_mass)
Si_eject_mass_list[Z_n].append(Si_mass)
S_eject_mass_list[Z_n].append(S_mass)
Ca_eject_mass_list[Z_n].append(Ca_mass)
Fe_eject_mass_list[Z_n].append(Fe_mass)
Metal_eject_mass_list[Z_n].append(Metal_mass)
O_over_Mg_list[Z_n].append(O_over_Mg)
Mg_over_Fe_list[Z_n].append(Mg_over_Fe)
Ca_over_Fe_list[Z_n].append(Ca_over_Fe)
Si_over_Fe_list[Z_n].append(Si_over_Fe)
Mg_over_H_list[Z_n].append(Mg_over_H)
Si_over_H_list[Z_n].append(Si_over_H)
C_over_H_list[Z_n].append(C_over_H)
O_over_H_list[Z_n].append(O_over_H)
Z_over_H_list[Z_n].append(Z_over_H)
Z_over_X_list[Z_n].append(Z_over_X)
Z_over_Z0_list[Z_n].append(Z_over_Z0)
XXX_list[Z_n].append(XXX)
YYY_list[Z_n].append(YYY)
ZZZ_list[Z_n].append(ZZZ)
Fe_over_H_list[Z_n].append(Fe_over_H)
O_over_Fe_list[Z_n].append(O_over_Fe)
(i) = (i - 1)
return
def function_get_Mfinal(Mfinal_string):
i_end = len(Mfinal_string)
i = 0
mass_str = ''
while i < i_end:
mass_str += Mfinal_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_mass(element_mass_string):
i_end = len(element_mass_string)
i = 1
mass_str = ''
while i < i_end:
mass_str += element_mass_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_line_number(data, element):
i = 0
while i < len(data):
line_i = str.split(data[i])
if line_i[1] == 'Table:':
start = i
j = 0
while j < 100:
line_j = str.split(data[j])
if line_j[0] == '&'+element:
end = j
element_relative_line_number = j - i
break
(j) = (j+1)
break
(i) = (i + 1)
return element_relative_line_number
def function_get_Z_M(M_Z_string):
i = 0
i_M_start = 0
i_M_end = 0
i_Z_start = 0
i_Z_end = 0
while i < len(M_Z_string):
if M_Z_string[i] == 'M':
i_M_start = i+2
if M_Z_string[i] == ',':
i_M_end = i
i_Z_start = i+3
if M_Z_string[i] == ')':
i_Z_end = i
(i) = (i+1)
i = i_Z_start
Z_str = ''
while i < i_Z_end:
Z_str += M_Z_string[i]
(i) = (i + 1)
Z = float(Z_str)
i = i_M_start
M_str = ''
while i < i_M_end:
M_str += M_Z_string[i]
(i) = (i + 1)
M = float(M_str)
return (Z, M)
def funtion_plot_yields():
global O_over_Mg_list, Mg_over_Fe_list, C_over_H_list, Mg_over_H_list, Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_X_list, Z_over_Z0_list, \
Z_over_H_list, O_over_Fe_list, M_list, Z_list, XXX_list, YYY_list, ZZZ_list
color_list_ = []
for i in range(len(Z_list)):
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
else:
Z_box = -6
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
j = 0
while j < len(M_list):
i = 0
while i < len(M_list[j]):
M_list[j][i] = math.log(M_list[j][i], 10)
(i) = (i+1)
(j) = (j+1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(1, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(0, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Mg_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_mass_eject_SNIa = 0.148 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
# Mg_mass_eject_SNIa = 0.009 # TNH93 0.009 i99CDD1 0.0077, i99CDD2 0.0042, i99W7 0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016
# O_num = O_mass_eject_SNIa / 15.9994
# Mg_num = Mg_mass_eject_SNIa / 24.305
# O_over_Mg_SNIa = math.log(O_num / Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Mg_SNIa, O_over_Mg_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Mg]')
# plt.tight_layout()
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(2, figsize=(4, 3.5))
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
Mg_mass_eject_SNIa = 0.0158 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
Fe_mass_eject_SNIa = 0.68 #0.63 # Recchi2009 halfed to 0.372 # TNH93 0.744 i99CDD1 0.56, i99CDD2 0.76, i99W7 0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63
Ca_mass_eject_SNIa = 0.0181
Si_mass_eject_SNIa = 0.142
Ca_num = Ca_mass_eject_SNIa / 40.078
Si_num = Si_mass_eject_SNIa / 28.085
Mg_num = Mg_mass_eject_SNIa / 24.305
Fe_num = Fe_mass_eject_SNIa / 55.845
Mg_over_Fe_SNIa = math.log(Mg_num / Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Si_over_Fe_SNIa = math.log(Si_num / Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
Ca_over_Fe_SNIa = math.log(Ca_num / Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.plot([-2, 3], [0, 0], lw=0.5, ls='dotted')
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 3.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'[Mg/Fe]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg_over_Fe.pdf', dpi=250)
#
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(3, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 7)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_over_Fe_SNIa = math.log(O_num / Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Fe]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(4, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Mg/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(42, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Si_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Si/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(41, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], C_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[C/H]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(5, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_O.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(6, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list)-1
# while i > -1:
# plt.plot(M_list[i], Fe_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Fe/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_Fe.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(7, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/H]')
# plt.title("Number ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(8, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_X_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/X]')
# plt.title("Mass ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(11, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(0.23, 0.6)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], YYY_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [0.25, 0.25], lw=0.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel('Y')
# plt.tight_layout()
# # plt.savefig('steller_yield_Y.pdf', dpi=250)
##########
fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
# i = len(M_list) - 1
# while i > -1:
# axs[0].plot(M_list[i], Z_over_Z0_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# # axs[0].set_yticks(np.arange(-1, 2.1, 1))
# axs[0].set_ylim(-2, 1.6)
# axs[0].set_ylabel(r'[Z]')
#
# i = len(M_list) - 1
# while i > -1:
# # axs[1].plot(M_list[i], XXX_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# axs[1].plot(M_list[i], YYY_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# # axs[1].plot(M_list[i], ZZZ_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[1].plot([-2, 3], [0.273, 0.273], lw=0.7, ls='dotted')
# # axs[1].set_yticks(np.arange(0.2, 0.61, 0.1))
# axs[1].set_ylim(0.24, 0.605)
# axs[1].set_xlim(-0.5, 2.2)
# axs[1].set_ylabel('Y')
# axs[0].plot([1.3073, 1.3073], [-0.1, 1.7], lw=0.2)
axs[0].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = round(math.log(ZZZ, 10) - math.log(0.01886, 10), 2)
else:
Z_box = -6
M_list[i].insert(0, math.log(150, 10))
O_over_Fe_list[i].insert(0, O_over_Fe_list[i][0])
axs[0].plot(M_list[i], O_over_Fe_list[i], lw=2**(i**0.5), label=r'$Z={}$'.format(ZZZ), color='k', ls=['-', 'dashed', 'dotted', '-.'][i])
(i) = (i - 1)
# axs[0].plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[0].set_yticks(np.arange(-2, 2.1, 2))
axs[0].set_xlim(0.7, 1.7)
# axs[0].set_ylim(-0.5, 1.7)
axs[0].set_ylabel(r'[O/Fe]')
axs[0].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
axs[0].legend(prop={'size': 6}, loc='best')
axs[1].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Mg_over_Fe_list[i].insert(0, Mg_over_Fe_list[i][0])
axs[1].plot(M_list[i], Mg_over_Fe_list[i], lw=2**(i**0.5), label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted', '-.'][i])
(i) = (i - 1)
# axs[1].plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[1].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[1].set_yticks(np.arange(-2, 2.1, 2))
# axs[1].set_ylim(-0.1, 1.7)
axs[1].set_ylabel(r'[Mg/Fe]')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[1].legend(prop={'size': 6}, loc='best')
axs[2].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Si_over_Fe_list[i].insert(0, Si_over_Fe_list[i][0])
axs[2].plot(M_list[i], Si_over_Fe_list[i], lw=2**(i**0.5), label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted', '-.'][i])
(i) = (i - 1)
# axs[2].plot([-0.3, 0.9], [Si_over_Fe_SNIa, Si_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[2].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[2].set_yticks(np.arange(-2, 2.1, 2))
# axs[2].set_ylim(-0.1, 1.7)
axs[2].set_ylabel(r'[Si/Fe]')
axs[2].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[2].legend(prop={'size': 6}, loc='best')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
# plt.savefig('stellar_yields.pdf', dpi=250)
plt.show()
return
if __name__ == '__main__':
start_time = time.time()
Z_list = []
M_list = []
eject_mass_list = []
H_eject_mass_list = []
He_eject_mass_list = []
C_eject_mass_list = []
N_eject_mass_list = []
O_eject_mass_list = []
Ne_eject_mass_list = []
Mg_eject_mass_list = []
Si_eject_mass_list = []
S_eject_mass_list = []
Ca_eject_mass_list = []
Fe_eject_mass_list = []
Metal_eject_mass_list = []
O_over_Mg_list = []
Mg_over_H_list = []
Si_over_H_list = []
C_over_H_list = []
Fe_over_H_list = []
O_over_H_list = []
Z_over_H_list = []
Z_over_X_list = []
Z_over_Z0_list = []
XXX_list = []
YYY_list = []
ZZZ_list = []
Mg_over_Fe_list = []
Si_over_Fe_list = []
Ca_over_Fe_list = []
O_over_Fe_list = []
yield_table_name = "Kobayashi06" # being "WW95" or "portinari98" or "marigo01"
function_read_file(yield_table_name)
funtion_plot_yields()
plot_lifetime_and_finalmass()
print(" - Run time: %s -" % round((time.time() - start_time), 2))
|
AzeretREPO_NAMEgalIMFPATH_START.@galIMF_extracted@galIMF-master@[email protected]_END.py
|
{
"filename": "base.py",
"repo_name": "dwkim78/pdtrend",
"repo_path": "pdtrend_extracted/pdtrend-master/pdtrend/datasets/base.py",
"type": "Python"
}
|
"""
Base IO code for all datasets
"""
import sys
from os.path import dirname
from os.path import join
def load_lightcurve_set():
"""
Return the set of light curves for testing pdtrend.
Returns
-------
lcs : numpy.ndarray
An array of light curves.
"""
import bz2
try:
import cPickle as pickle
except:
import pickle
module_path = dirname(__file__)
# The light curves are bzipped and pickled.
file_path = join(module_path, 'lightcurves/lc.pbz2')
# For Python 3.
if sys.version_info.major >= 3:
lcs = pickle.load(bz2.BZ2File(file_path, 'r'), encoding='bytes')
# For Python 2.
else:
lcs = pickle.load(bz2.BZ2File(file_path, 'r'))
return lcs
|
dwkim78REPO_NAMEpdtrendPATH_START.@pdtrend_extracted@pdtrend-master@pdtrend@[email protected]@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/sankey/hoverlabel/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="style", parent_name="sankey.hoverlabel.font", **kwargs
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@sankey@hoverlabel@font@[email protected]_END.py
|
{
"filename": "test_units.py",
"repo_name": "galsci/pysm",
"repo_path": "pysm_extracted/pysm-main/tests/test_units.py",
"type": "Python"
}
|
import healpy as hp
import numpy as np
import pytest
from pysm3 import units
from pysm3.models.template import read_map
@pytest.fixture
def setUp():
T_CMB = 100.0 * units.K_CMB
T_RJ = 100.0 * units.K_RJ
freqs = 100.0 * units.GHz
nside = 256
npix = hp.nside2npix(nside)
test_map_RJ = np.random.randn(npix) * units.K_RJ
test_map_CMB = np.random.randn(npix) * units.K_CMB
test_map_dimless = units.Quantity(np.random.randn(npix), "")
return T_CMB, T_RJ, freqs, test_map_RJ, test_map_CMB, test_map_dimless
def test_conversion(setUp):
""" Here we test that the numerical value of the conversion is correct.
The mathematical form is
..math::
I_\\nu = \\frac{2 \\nu^2 k T_{\\rm RJ}}{c^2} \\\\
I_\\nu = T_{\\rm CMB} B^\\prime_\\nu(T_{\\rm CMB, 0})
so, eliminating the flux in this equation:
..math::
T_{\\rm RJ} / T_{\\rm CMB} = \\frac{c^2}{2 \\nu^2 k_B}B^\\prime_\\nu(T_{\\rm CMB, 0})
Here we calculate the RHS of this equation and compare it to the
ratio of T_RJ and the result of its transformation to T_CMB.
"""
T_CMB, T_RJ, freqs, test_map_RJ, test_map_CMB, test_map_dimless = setUp
equiv = {"equivalencies": units.cmb_equivalencies(freqs)}
rj_from_cmb = T_CMB.to(units.K_RJ, **equiv)
cmb_from_rj = T_RJ.to(units.K_CMB, **equiv)
# check that the reverse transformation gives overall transformation of unity.
reverse1 = rj_from_cmb.to(units.K_CMB, **equiv)
reverse2 = cmb_from_rj.to(units.K_RJ, **equiv)
np.testing.assert_almost_equal(1.0, T_CMB / reverse1, decimal=6)
np.testing.assert_almost_equal(1.0, T_RJ / reverse2, decimal=6)
def test_fits_unit_functionality(setUp, tmp_path):
"""Test that the units can be written to the fits header. Check that
they can be read in again and assigned to the data in that fits file
correctly.
"""
T_CMB, T_RJ, freqs, test_map_RJ, test_map_CMB, test_map_dimless = setUp
hp.write_map(
tmp_path / "temp_fits_file_RJ.fits",
test_map_RJ.value,
column_units=test_map_RJ.unit.to_string("generic"),
)
hp.write_map(
tmp_path / "temp_fits_file_CMB.fits",
test_map_CMB.value,
column_units=test_map_CMB.unit.to_string("generic"),
)
hp.write_map(
tmp_path / "temp_fits_file_dimless.fits",
test_map_dimless.value,
column_units=test_map_dimless.unit.to_string("generic"),
)
hp.write_map(tmp_path / "temp_fits_file_no_unit_hdr.fits", test_map_dimless.value)
cmb_in = read_map(tmp_path / "temp_fits_file_CMB.fits", 256)
rj_in = read_map(tmp_path / "temp_fits_file_RJ.fits", 256)
dimless_in = read_map(tmp_path / "temp_fits_file_dimless.fits", 256)
no_unit_hdr = read_map(tmp_path / "temp_fits_file_no_unit_hdr.fits", 256)
assert cmb_in.unit == units.K_CMB
assert rj_in.unit == units.K_RJ
assert dimless_in.unit == units.dimensionless_unscaled
assert no_unit_hdr.unit == units.dimensionless_unscaled
|
galsciREPO_NAMEpysmPATH_START.@pysm_extracted@pysm-main@tests@[email protected]_END.py
|
{
"filename": "settings.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/io/fits/hdu/compressed/settings.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: "NO_DITHER",
SUBTRACTIVE_DITHER_1: "SUBTRACTIVE_DITHER_1",
SUBTRACTIVE_DITHER_2: "SUBTRACTIVE_DITHER_2",
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = (
"NOCOMPRESS",
"RICE_1",
"GZIP_1",
"GZIP_2",
"PLIO_1",
"HCOMPRESS_1",
)
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = "RICE_1"
DEFAULT_QUANTIZE_LEVEL = 16.0
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {"RICE_ONE": "RICE_1"}
COMPRESSION_KEYWORDS = {
"ZIMAGE",
"ZCMPTYPE",
"ZBITPIX",
"ZNAXIS",
"ZMASKCMP",
"ZSIMPLE",
"ZTENSION",
"ZEXTEND",
}
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@io@fits@hdu@[email protected]@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattercarpet/legendgrouptitle/font/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="weight",
parent_name="scattercarpet.legendgrouptitle.font",
**kwargs,
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattercarpet@legendgrouptitle@font@[email protected]_END.py
|
{
"filename": "orderl5.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/TidalPy/tides/inclination_funcs/orderl5.py",
"type": "Python"
}
|
""" Inclination functions (squared) for tidal order-l = 5. These are exact (no truncation on I)
"""
from typing import TYPE_CHECKING
import numpy as np
from . import InclinOutput
from ...utilities.performance.numba import njit
if TYPE_CHECKING:
from ...utilities.types import FloatArray
@njit(cacheable=True, parallel=True)
def calc_inclination_off(inclination: 'FloatArray') -> 'InclinOutput':
"""Calculate F^2_lmp (assuming I=0) for l = 5"""
# Inclination Functions Calculated for l = 5, Inclination == off.
ones_ = np.ones_like(inclination)
inclination_results = {
(1, 2): 3.515625 * ones_,
(3, 1): 2756.25 * ones_,
(5, 0): 893025. * ones_,
}
return inclination_results
@njit(cacheable=True, parallel=True)
def calc_inclination(inclination: 'FloatArray') -> 'InclinOutput':
"""Calculate F^2_lmp for l = 5"""
# Inclination Functions Calculated for l = 5.
# Optimizations
i = inclination
i_half = i / 2.
i_double = 2. * i
i_triple = 3. * i
sin_i = np.sin(i)
cos_i = np.cos(i)
sin_i_half = np.sin(i_half)
cos_i_half = np.cos(i_half)
cos_i_double = np.cos(i_double)
cos_i_triple = np.cos(i_triple)
inclination_results = {
(0, 0) : 62.015625*sin_i_half**10*cos_i_half**10,
(0, 1) : 1.5140533447265625*(0.8888888888888888888888889 - sin_i**2)**2*sin_i**6,
(0, 2) : 1406.25*(0.0015625*(cos_i + 1.0)**4*sin_i + 0.05*sin_i_half**9*cos_i_half - 0.5*sin_i_half**7*cos_i_half**3 + sin_i_half**5*cos_i_half**5 - 0.5*sin_i_half**3*cos_i_half**7)**2,
(0, 3) : 1406.25*(-0.0015625*(cos_i + 1.0)**4*sin_i - 0.05*sin_i_half**9*cos_i_half + 0.5*sin_i_half**7*cos_i_half**3 - sin_i_half**5*cos_i_half**5 + 0.5*sin_i_half**3*cos_i_half**7)**2,
(0, 4) : 1.5140533447265625*(sin_i**2 - 0.8888888888888888888888889)**2*sin_i**6,
(0, 5) : 62.015625*sin_i_half**10*cos_i_half**10,
(1, 0) : 1550.390625*sin_i_half**8*cos_i_half**12,
(1, 1) : 44.42274570465087890625*(cos_i + 1.0)**4*(-cos_i + 0.6461538461538461538461539*cos_i_double - 0.2307692307692307692307692*cos_i_triple + 0.5846153846153846153846154)**2,
(1, 2) : 696181.640625*(-0.4157303370786516853932584*sin_i_half**10 + sin_i_half**8 - 0.7865168539325842696629214*sin_i_half**6 + 0.2022471910112359550561798*sin_i_half**4 + 0.05617977528089887640449438*cos_i_half**10 - 0.05393258426966292134831461*cos_i_half**8)**2,
(1, 3) : 146306.25*(0.563725490196078431372549*sin_i_half**8 - sin_i_half**6 + 0.4411764705882352941176471*sin_i_half**4 + 0.4656862745098039215686275*cos_i_half**8 - 0.3921568627450980392156863*cos_i_half**6)**2*sin_i_half**4,
(1, 4) : 605.621337890625*(cos_i + 1.0)**2*(-sin_i**2 + 0.4*cos_i + 0.9333333333333333333333333)**2*sin_i_half**8,
(1, 5) : 1550.390625*sin_i_half**12*cos_i_half**8,
(2, 0) : 24806.25*sin_i_half**6*cos_i_half**14,
(2, 1) : 135056.25*(-0.004464285714285714285714286*(cos_i + 1.0)**4*sin_i - sin_i_half**5*cos_i_half**5 + sin_i_half**3*cos_i_half**7)**2,
(2, 2) : 620156.25*(0.002083333333333333333333333*(cos_i + 1.0)**4*sin_i - 0.3333333333333333333333333*sin_i_half**7*cos_i_half**3 + sin_i_half**5*cos_i_half**5 - 0.6*sin_i_half**3*cos_i_half**7)**2,
(2, 3) : 4192256.25*(-0.6410256410256410256410256*sin_i_half**6 + sin_i_half**4 - 0.3846153846153846153846154*sin_i_half**2 + 0.1282051282051282051282051*cos_i_half**6)**2*sin_i_half**6*cos_i_half**2,
(2, 4) : 1215506.25*(0.7142857142857142857142857*sin_i_half**4 - sin_i_half**2 + 0.3333333333333333333333333)**2*sin_i_half**10*cos_i_half**2,
(2, 5) : 24806.25*sin_i_half**14*cos_i_half**6,
(3, 0) : 223256.25*sin_i_half**4*cos_i_half**16,
(3, 1) : 7848.8525390625*(cos_i + 1.0)**6*(-0.8333333333333333333333333*cos_i**2 + cos_i - 0.2407407407407407407407407)**2,
(3, 2) : 25587.50152587890625*(cos_i + 1.0)**4*(cos_i - 0.6461538461538461538461539*cos_i_double + 0.2307692307692307692307692*cos_i_triple - 0.5846153846153846153846154)**2,
(3, 3) : 348837.890625*(cos_i + 1.0)**2*(-sin_i**2 + 0.4*cos_i + 0.9333333333333333333333333)**2*sin_i_half**8,
(3, 4) : 579501.5625*(0.7758620689655172413793103*sin_i**2 - 0.9310344827586206896551724*cos_i - 1)**2*sin_i_half**12,
(3, 5) : 223256.25*sin_i_half**16*cos_i_half**4,
(4, 0) : 872.0947265625*(cos_i + 1.0)**8*sin_i**2,
(4, 1) : 14288400.0*(-0.0078125*(cos_i + 1.0)**4*sin_i + sin_i_half**3*cos_i_half**7)**2,
(4, 2) : 22325625.0*(0.2 - cos_i)**2*sin_i_half**6*cos_i_half**10,
(4, 3) : 22325625.0*(-cos_i - 0.2)**2*sin_i_half**10*cos_i_half**6,
(4, 4) : 5581406.25*(cos_i + 0.6)**2*sin_i_half**14*cos_i_half**2,
(4, 5) : 893025.0*sin_i_half**18*cos_i_half**2,
(5, 0) : 893025.0*cos_i_half**20,
(5, 1) : 22325625.0*sin_i_half**4*cos_i_half**16,
(5, 2) : 89302500.0*sin_i_half**8*cos_i_half**12,
(5, 3) : 89302500.0*sin_i_half**12*cos_i_half**8,
(5, 4) : 22325625.0*sin_i_half**16*cos_i_half**4,
(5, 5) : 893025.0*sin_i_half**20
}
return inclination_results
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@TidalPy@tides@[email protected]@.PATH_END.py
|
{
"filename": "read_elower_grid.py",
"repo_name": "HajimeKawahara/exojax",
"repo_path": "exojax_extracted/exojax-master/documents/analysis/read_elower_grid.py",
"type": "Python"
}
|
# %%
from exojax.spec.lbderror import optimal_params
Tl_in = 500.0 #K
Tu_in = 1200.0 #K
diffmode = 2
dE, Tl, Tu = optimal_params(Tl_in, Tu_in, diffmode)
print(dE, Tl, Tu)
#750.0 1153.6267095763965 554.1714566743503
|
HajimeKawaharaREPO_NAMEexojaxPATH_START.@exojax_extracted@exojax-master@documents@analysis@[email protected]_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/layout/ternary/baxis/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.ternary.baxis"
_path_str = "layout.ternary.baxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.ternary
.baxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.ternary.baxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.baxis.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@layout@ternary@baxis@[email protected]_END.py
|
{
"filename": "master_script.py",
"repo_name": "bbercovici/SBGAT",
"repo_path": "SBGAT_extracted/SBGAT-master/Examples/PGMUncertaintyMCPolesConstantMass/master_script.py",
"type": "Python"
}
|
import os
import json
import numpy as np
import os
import platform
import sys
import itertools
import time
import socket
def generate_all_cases_dictionnary_list(base_dictionnary,all_cases_dictionnary,base_location,sim_name):
if len(all_cases_dictionnary) > 0:
keys, values = zip(*all_cases_dictionnary.items())
dictionnary_list = [dict(zip(keys, v)) for v in itertools.product(*values)]
all_cases_dictionnary_list = [{**dictionnary_list[e],**base_dictionnary} for e in range(len(dictionnary_list))]
else:
all_cases_dictionnary_list = [base_dictionnary]
dictionnary_list = [base_dictionnary]
for e in range(len(dictionnary_list)):
all_cases_dictionnary_list[e]["INPUT_DIR"] = base_location + "input/" + sim_name + "_" + str(e) + "/"
all_cases_dictionnary_list[e]["OUTPUT_DIR"] = base_location + "output/" + sim_name + "_" + str(e) + "/"
return all_cases_dictionnary_list
# Replace the paths after 'base_location' with the existing directory under which the input/ and /output sub-directories
# will be created and populated
if (socket.gethostname() == "fortuna"):
base_location = "/orc_raid/bebe0705/PGMUncertaintyMCPolesConstantMass/"
else:
base_location = "../"
# SIM_PREFIX will be added to the name of every folder to be put in input/ and output/
SIM_PREFIX = "PGMUncertaintyMCPolesConstantMass"
# Dictionnary storing simulation inputs to be kept constant
base_dictionnary = {
"DENSITY" : 4500,
"UNIT_IN_METERS" : False,
"STEP_SIZE" : 10e3,
"PATH_SHAPE" : "../../../resources/shape_models/psyche.obj",
"PROJECTION_AXIS" : 0,
"UNCERTAINTY_TYPE" : "normal",
"N_MONTE_CARLO" : 5000,
"CORRELATION_DISTANCE" : 75e3,
"ERROR_STANDARD_DEV" : 10e3,
"COV_REGION_CENTERS" : [0,1147]
}
# Dictionnary storing simulation inputs to be looped over
# for instance, one could have
# all_cases_dictionnary = {
# "INPUT_THAT_MUST_BE_CHANGED_1" : [1,2,3]
# "INPUT_THAT_MUST_BE_CHANGED_2" : [True,False]
# }
# which means that a total of six (two times three) simulations will be run
# and saved in input/ and output/, with the names of the subfolder prefixed by SIM_PREFIX"
all_cases_dictionnary = {
"HOLD_MASS_CONSTANT" : [True]
}
# There shouldn't be any reason to modify the following
all_data = generate_all_cases_dictionnary_list(base_dictionnary,
all_cases_dictionnary,base_location,SIM_PREFIX)
os.system("cmake .. && make")
for data in all_data:
print("\t Case " + data["INPUT_DIR"].split("/")[-2])
print("\t - Making directory")
os.system("mkdir " + data["INPUT_DIR"])
os.system("mkdir " + data["OUTPUT_DIR"])
print("\t - Copying input file in build/")
with open('input_file.json', 'w') as outfile:
json.dump(data, outfile)
print("\t - Saving input file in input/ and output/")
with open(data["INPUT_DIR"] + 'input_file.json', 'w') as outfile:
json.dump(data, outfile)
with open(data["OUTPUT_DIR"] + 'input_file.json', 'w') as outfile:
json.dump(data, outfile)
print("\t - Running case " + data["INPUT_DIR"].split("/")[-2])
os.system("> " + data["OUTPUT_DIR"] + "log.txt")
os.system("./PGMUncertaintyMCPolesConstantMass 2>&1 | tee " + data["OUTPUT_DIR"] + "log.txt" )
|
bbercoviciREPO_NAMESBGATPATH_START.@SBGAT_extracted@SBGAT-master@Examples@PGMUncertaintyMCPolesConstantMass@[email protected]_END.py
|
{
"filename": "test_set_active_attribute.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/integrationtests/mayavi/test_set_active_attribute.py",
"type": "Python"
}
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import copy
# Local imports.
from mayavi.core.common import get_output
from common import TestCase, get_example_data
class TestSetActiveAttribute(TestCase):
def check(self):
"""Check if the visualization is OK. Note that this is not an
image based check, which is very convenient.
"""
e = self.script.engine
scene = e.current_scene
src = scene.children[0]
assert src.point_scalars_name == 'u'
c = src.children[1]
sc = get_output(c.outputs[0]).point_data.scalars
assert sc.name == 'u'
# It is an iso-contour!
assert sc.range[0] == sc.range[1]
aa = c.children[0].children[0]
assert aa.point_scalars_name == 't'
sc = get_output(aa.outputs[0]).point_data.scalars
assert sc.name == 't'
assert abs(sc.range[0] - 308) < 1.0
assert abs(sc.range[1] - 631) < 1.0
s = aa.children[0].children[0]
def test(self):
self.main()
def do(self):
"""Test for the SetActiveAttribute filter.
"""
from mayavi.sources.api import VTKXMLFileReader
from mayavi.filters.contour import Contour
from mayavi.filters.api import PolyDataNormals
from mayavi.filters.set_active_attribute import SetActiveAttribute
from mayavi.modules.api import Surface, Outline
mayavi = script = self.script
scene = self.new_scene()
r = VTKXMLFileReader()
r.initialize(get_example_data('fire_ug.vtu'))
mayavi.add_source(r)
r.point_scalars_name = 'u'
o = Outline()
mayavi.add_module(o)
c = Contour()
mayavi.add_filter(c)
n = PolyDataNormals()
mayavi.add_filter(n)
aa = SetActiveAttribute()
mayavi.add_filter(aa)
aa.point_scalars_name = 't'
s = Surface()
mayavi.add_module(s)
scene.scene.isometric_view()
# Check if things are OK.
self.check()
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
mayavi.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = mayavi.engine
engine.close_scene(s)
# Load visualization
mayavi.load_visualization(f)
s = engine.current_scene
# Now do the check.
s.scene.isometric_view()
self.check()
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
# Now do the check.
self.check()
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
# Now do the check.
s.scene.isometric_view()
self.check()
# If we have come this far, we are golden!
if __name__ == '__main__':
t = TestSetActiveAttribute()
t.test()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@integrationtests@mayavi@[email protected]_END.py
|
{
"filename": "conf.py",
"repo_name": "halomod/hmf",
"repo_path": "hmf_extracted/hmf-main/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# hmf documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 2 10:40:08 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
from importlib_metadata import version as _version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions =extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx.ext.autosectionlabel",
"numpydoc",
"nbsphinx",
"IPython.sphinxext.ipython_console_highlighting",
"sphinx.ext.intersphinx",
]
numpydoc_show_class_members = False
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hmf'
copyright = u'%s, Steven Murray' % (time.localtime()[0])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"templates",
"examples/.ipynb_checkpoints",
]
# The short X.Y version.
version = _version('hmf')
# The full version, including alpha/beta/rc tags.
release = _version('hmf')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hmfdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'hmf.tex', u'hmf Documentation',
u'Steven Murray', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hmf', u'hmf Documentation',
[u'Steven Murray'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hmf', u'hmf Documentation',
u'Steven Murray', 'hmf', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"astropy": ("https://docs.astropy.org/en/stable/", None),
}
mathjax_path = "http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
|
halomodREPO_NAMEhmfPATH_START.@hmf_extracted@hmf-main@[email protected]@.PATH_END.py
|
{
"filename": "timesample.py",
"repo_name": "CMB-S4/spt3g_software",
"repo_path": "spt3g_software_extracted/spt3g_software-master/core/tests/timesample.py",
"type": "Python"
}
|
#!/usr/bin/env python
from spt3g import core
import unittest
import numpy as np
import copy
SEC = core.G3Units.sec
class TestTimesampleVector(unittest.TestCase):
def test_from_list(self):
t0 = core.G3Time('2019-01-01T12:30:00')
vectime = core.G3VectorTime([t0, t0 + 10*SEC])
assert(vectime[0].time == t0.time)
assert(vectime[1].time == t0.time + 10*SEC)
def test_from_numpy_array(self):
t0 = core.G3Time('2019-01-01T12:30:00')
timestamps = np.linspace(t0.time, t0.time + 1e7*SEC, 3000)
vectime = core.G3VectorTime(timestamps)
assert(vectime[0] == t0)
assert(vectime[-1] == core.G3Time(timestamps[-1]))
assert(len(vectime) == len(timestamps))
def test_to_numpy_array(self):
t0 = core.G3Time('2019-01-01T12:30:00')
timestamps = np.linspace(t0.time, t0.time + 1e7*SEC, 3000, dtype='int64')
vectime = core.G3VectorTime(timestamps)
assert((np.asarray(vectime) == timestamps).all())
def test_reinit_from_numpy_array(self):
t0 = core.G3Time('2019-01-01T12:30:00')
timestamps = np.linspace(t0.time, t0.time + 1e7*SEC, 3000, dtype='int64')
vectime = core.G3VectorTime(timestamps)
revectime = core.G3VectorTime(np.asarray(vectime))
assert((np.asarray(revectime) == timestamps).all())
def test_copy_constructor(self):
t0 = core.G3VectorTime(np.array([100000000, 200000000]))
t1 = core.G3VectorTime(t0)
assert((np.asarray(t0) == np.asarray(t1)).all())
def get_test_block(length, keys=['a', 'b', 'c', 'd'],
offset=0, ordered=True):
type_cycle = [(core.G3VectorDouble, float),
(core.G3VectorInt, int),
(core.G3VectorString, str),
(core.G3VectorBool, bool)]
t0 = core.G3Time('2019-01-01T12:30:00') + offset*SEC
m = core.G3TimesampleMap()
times = np.arange(length)
if not ordered:
np.random.shuffle(times)
m.times = core.G3VectorTime(t0 + times*SEC)
for i, k in enumerate(keys):
y = (np.random.uniform(size=length) * 100).astype(int)
constructor, cast_func = type_cycle[i % len(type_cycle)]
vect = constructor(list(map(cast_func, y)))
m[k] = vect
return m
class TestTimesampleMap(unittest.TestCase):
def test_00_internal_checks(self):
# Valid block.
m = get_test_block(100)
m.check()
def test_10_safety(self):
m0 = get_test_block(100)
m1 = get_test_block(101)
# Try to add an incompatible element.
with self.assertRaises(ValueError):
m0.times = m1.times
with self.assertRaises(ValueError):
m0['a'] = m1['d']
# But we should be able to change times in an empty vector.
m0 = get_test_block(100, [])
m1 = get_test_block(101)
m0.times = m1.times
m0['x'] = m1['a']
def test_20_concat(self):
# Test concatenation.
key_list = ['w', 'x', 'y', 'z']
m0 = get_test_block(100, key_list)
m1 = get_test_block(200, key_list, offset=100)
m01 = m0.concatenate(m1)
self.assertTrue(np.all(
np.hstack([np.array(m0.times), np.array(m1.times)]) == np.array(m01.times)))
for k in key_list:
self.assertTrue(np.all(
np.hstack([np.array(m0[k]), np.array(m1[k])]) == np.array(m01[k])))
for fail_vec in [
get_test_block(200, key_list + ['extra'], 100),
get_test_block(200, key_list[:-1], 100),
]:
with self.assertRaises(ValueError):
m0.concatenate(fail_vec)
def test_30_serialization(self):
m0 = get_test_block(100)
m1 = get_test_block(200, offset=100)
m2 = m0.concatenate(m1)
m0.check()
m1.check()
m2.check()
f = core.G3Frame()
f['irreg0'] = m0
f['irreg1'] = m1
core.G3Writer('test.g3').Process(f)
f = core.G3Reader('test.g3').Process(None)[0]
f['irreg0'].check()
f['irreg1'].check()
f['irreg0'].concatenate(f['irreg1'])['b']
def test_40_sort(self):
m0 = get_test_block(100, ordered=False)
m1 = copy.deepcopy(m0)
m0.sort()
idx = np.argsort(m1.times)
self.assertTrue((np.asarray(m1.times)[idx] == np.asarray(m0.times)).all())
for k in m0.keys():
self.assertTrue((np.asarray(m1[k])[idx] == np.asarray(m0[k])).all())
if __name__ == '__main__':
unittest.main()
|
CMB-S4REPO_NAMEspt3g_softwarePATH_START.@spt3g_software_extracted@spt3g_software-master@core@[email protected]@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/title/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="layout.xaxis.title", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@title@[email protected]_END.py
|
{
"filename": "test_ontotext_graphdb_qa.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/chains/test_ontotext_graphdb_qa.py",
"type": "Python"
}
|
from unittest.mock import MagicMock, Mock
import pytest
from langchain.chains import LLMChain
from langchain_community.chains.graph_qa.ontotext_graphdb import OntotextGraphDBQAChain
from langchain_community.graphs import OntotextGraphDBGraph
"""
cd libs/langchain/tests/integration_tests/chains/docker-compose-ontotext-graphdb
./start.sh
"""
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize("max_fix_retries", [-2, -1, 0, 1, 2])
def test_valid_sparql(max_fix_retries: int) -> None:
from langchain_openai import ChatOpenAI
question = "What is Luke Skywalker's home planet?"
answer = "Tatooine"
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
Mock(ChatOpenAI),
graph=graph,
max_fix_retries=max_fix_retries,
)
chain.sparql_generation_chain = Mock(LLMChain)
chain.sparql_fix_chain = Mock(LLMChain)
chain.qa_chain = Mock(LLMChain)
chain.sparql_generation_chain.output_key = "text"
chain.sparql_generation_chain.invoke = MagicMock(
return_value={
"text": "SELECT * {?s ?p ?o} LIMIT 1",
"prompt": question,
"schema": "",
}
)
chain.sparql_fix_chain.output_key = "text"
chain.sparql_fix_chain.invoke = MagicMock()
chain.qa_chain.output_key = "text"
chain.qa_chain.invoke = MagicMock(
return_value={
"text": answer,
"prompt": question,
"context": [],
}
)
result = chain.invoke({chain.input_key: question})
assert chain.sparql_generation_chain.invoke.call_count == 1
assert chain.sparql_fix_chain.invoke.call_count == 0
assert chain.qa_chain.invoke.call_count == 1
assert result == {chain.output_key: answer, chain.input_key: question}
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize("max_fix_retries", [-2, -1, 0])
def test_invalid_sparql_non_positive_max_fix_retries(
max_fix_retries: int,
) -> None:
from langchain_openai import ChatOpenAI
question = "What is Luke Skywalker's home planet?"
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
Mock(ChatOpenAI),
graph=graph,
max_fix_retries=max_fix_retries,
)
chain.sparql_generation_chain = Mock(LLMChain)
chain.sparql_fix_chain = Mock(LLMChain)
chain.qa_chain = Mock(LLMChain)
chain.sparql_generation_chain.output_key = "text"
chain.sparql_generation_chain.invoke = MagicMock(
return_value={
"text": "```sparql SELECT * {?s ?p ?o} LIMIT 1```",
"prompt": question,
"schema": "",
}
)
chain.sparql_fix_chain.output_key = "text"
chain.sparql_fix_chain.invoke = MagicMock()
chain.qa_chain.output_key = "text"
chain.qa_chain.invoke = MagicMock()
with pytest.raises(ValueError) as e:
chain.invoke({chain.input_key: question})
assert str(e.value) == "The generated SPARQL query is invalid."
assert chain.sparql_generation_chain.invoke.call_count == 1
assert chain.sparql_fix_chain.invoke.call_count == 0
assert chain.qa_chain.invoke.call_count == 0
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize("max_fix_retries", [1, 2, 3])
def test_valid_sparql_after_first_retry(max_fix_retries: int) -> None:
from langchain_openai import ChatOpenAI
question = "What is Luke Skywalker's home planet?"
answer = "Tatooine"
generated_invalid_sparql = "```sparql SELECT * {?s ?p ?o} LIMIT 1```"
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
Mock(ChatOpenAI),
graph=graph,
max_fix_retries=max_fix_retries,
)
chain.sparql_generation_chain = Mock(LLMChain)
chain.sparql_fix_chain = Mock(LLMChain)
chain.qa_chain = Mock(LLMChain)
chain.sparql_generation_chain.output_key = "text"
chain.sparql_generation_chain.invoke = MagicMock(
return_value={
"text": generated_invalid_sparql,
"prompt": question,
"schema": "",
}
)
chain.sparql_fix_chain.output_key = "text"
chain.sparql_fix_chain.invoke = MagicMock(
return_value={
"text": "SELECT * {?s ?p ?o} LIMIT 1",
"error_message": "pyparsing.exceptions.ParseException: "
"Expected {SelectQuery | ConstructQuery | DescribeQuery | AskQuery}, "
"found '`' (at char 0), (line:1, col:1)",
"generated_sparql": generated_invalid_sparql,
"schema": "",
}
)
chain.qa_chain.output_key = "text"
chain.qa_chain.invoke = MagicMock(
return_value={
"text": answer,
"prompt": question,
"context": [],
}
)
result = chain.invoke({chain.input_key: question})
assert chain.sparql_generation_chain.invoke.call_count == 1
assert chain.sparql_fix_chain.invoke.call_count == 1
assert chain.qa_chain.invoke.call_count == 1
assert result == {chain.output_key: answer, chain.input_key: question}
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize("max_fix_retries", [1, 2, 3])
def test_invalid_sparql_server_response_400(max_fix_retries: int) -> None:
from langchain_openai import ChatOpenAI
question = "Who is the oldest character?"
generated_invalid_sparql = (
"PREFIX : <https://swapi.co/vocabulary/> "
"PREFIX owl: <http://www.w3.org/2002/07/owl#> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
"SELECT ?character (MAX(?lifespan) AS ?maxLifespan) "
"WHERE {"
" ?species a :Species ;"
" :character ?character ;"
" :averageLifespan ?lifespan ."
" FILTER(xsd:integer(?lifespan))"
"} "
"ORDER BY DESC(?maxLifespan) "
"LIMIT 1"
)
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
Mock(ChatOpenAI),
graph=graph,
max_fix_retries=max_fix_retries,
)
chain.sparql_generation_chain = Mock(LLMChain)
chain.sparql_fix_chain = Mock(LLMChain)
chain.qa_chain = Mock(LLMChain)
chain.sparql_generation_chain.output_key = "text"
chain.sparql_generation_chain.invoke = MagicMock(
return_value={
"text": generated_invalid_sparql,
"prompt": question,
"schema": "",
}
)
chain.sparql_fix_chain.output_key = "text"
chain.sparql_fix_chain.invoke = MagicMock()
chain.qa_chain.output_key = "text"
chain.qa_chain.invoke = MagicMock()
with pytest.raises(ValueError) as e:
chain.invoke({chain.input_key: question})
assert str(e.value) == "Failed to execute the generated SPARQL query."
assert chain.sparql_generation_chain.invoke.call_count == 1
assert chain.sparql_fix_chain.invoke.call_count == 0
assert chain.qa_chain.invoke.call_count == 0
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize("max_fix_retries", [1, 2, 3])
def test_invalid_sparql_after_all_retries(max_fix_retries: int) -> None:
from langchain_openai import ChatOpenAI
question = "What is Luke Skywalker's home planet?"
generated_invalid_sparql = "```sparql SELECT * {?s ?p ?o} LIMIT 1```"
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
Mock(ChatOpenAI),
graph=graph,
max_fix_retries=max_fix_retries,
)
chain.sparql_generation_chain = Mock(LLMChain)
chain.sparql_fix_chain = Mock(LLMChain)
chain.qa_chain = Mock(LLMChain)
chain.sparql_generation_chain.output_key = "text"
chain.sparql_generation_chain.invoke = MagicMock(
return_value={
"text": generated_invalid_sparql,
"prompt": question,
"schema": "",
}
)
chain.sparql_fix_chain.output_key = "text"
chain.sparql_fix_chain.invoke = MagicMock(
return_value={
"text": generated_invalid_sparql,
"error_message": "pyparsing.exceptions.ParseException: "
"Expected {SelectQuery | ConstructQuery | DescribeQuery | AskQuery}, "
"found '`' (at char 0), (line:1, col:1)",
"generated_sparql": generated_invalid_sparql,
"schema": "",
}
)
chain.qa_chain.output_key = "text"
chain.qa_chain.invoke = MagicMock()
with pytest.raises(ValueError) as e:
chain.invoke({chain.input_key: question})
assert str(e.value) == "The generated SPARQL query is invalid."
assert chain.sparql_generation_chain.invoke.call_count == 1
assert chain.sparql_fix_chain.invoke.call_count == max_fix_retries
assert chain.qa_chain.invoke.call_count == 0
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize(
"max_fix_retries,number_of_invalid_responses",
[(1, 0), (2, 0), (2, 1), (10, 6)],
)
def test_valid_sparql_after_some_retries(
max_fix_retries: int, number_of_invalid_responses: int
) -> None:
from langchain_openai import ChatOpenAI
question = "What is Luke Skywalker's home planet?"
answer = "Tatooine"
generated_invalid_sparql = "```sparql SELECT * {?s ?p ?o} LIMIT 1```"
generated_valid_sparql_query = "SELECT * {?s ?p ?o} LIMIT 1"
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
Mock(ChatOpenAI),
graph=graph,
max_fix_retries=max_fix_retries,
)
chain.sparql_generation_chain = Mock(LLMChain)
chain.sparql_fix_chain = Mock(LLMChain)
chain.qa_chain = Mock(LLMChain)
chain.sparql_generation_chain.output_key = "text"
chain.sparql_generation_chain.invoke = MagicMock(
return_value={
"text": generated_invalid_sparql,
"prompt": question,
"schema": "",
}
)
chain.sparql_fix_chain.output_key = "text"
chain.sparql_fix_chain.invoke = Mock()
chain.sparql_fix_chain.invoke.side_effect = [
{
"text": generated_invalid_sparql,
"error_message": "pyparsing.exceptions.ParseException: "
"Expected {SelectQuery | ConstructQuery | DescribeQuery | AskQuery}, "
"found '`' (at char 0), (line:1, col:1)",
"generated_sparql": generated_invalid_sparql,
"schema": "",
}
] * number_of_invalid_responses + [
{
"text": generated_valid_sparql_query,
"error_message": "pyparsing.exceptions.ParseException: "
"Expected {SelectQuery | ConstructQuery | DescribeQuery | AskQuery}, "
"found '`' (at char 0), (line:1, col:1)",
"generated_sparql": generated_invalid_sparql,
"schema": "",
}
]
chain.qa_chain.output_key = "text"
chain.qa_chain.invoke = MagicMock(
return_value={
"text": answer,
"prompt": question,
"context": [],
}
)
result = chain.invoke({chain.input_key: question})
assert chain.sparql_generation_chain.invoke.call_count == 1
assert chain.sparql_fix_chain.invoke.call_count == number_of_invalid_responses + 1
assert chain.qa_chain.invoke.call_count == 1
assert result == {chain.output_key: answer, chain.input_key: question}
@pytest.mark.requires("langchain_openai", "rdflib")
@pytest.mark.parametrize(
"model_name,question",
[
("gpt-3.5-turbo-1106", "What is the average height of the Wookiees?"),
("gpt-3.5-turbo-1106", "What is the climate on Tatooine?"),
("gpt-3.5-turbo-1106", "What is Luke Skywalker's home planet?"),
("gpt-4-1106-preview", "What is the average height of the Wookiees?"),
("gpt-4-1106-preview", "What is the climate on Tatooine?"),
("gpt-4-1106-preview", "What is Luke Skywalker's home planet?"),
],
)
def test_chain(model_name: str, question: str) -> None:
from langchain_openai import ChatOpenAI
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/starwars",
query_ontology="CONSTRUCT {?s ?p ?o} "
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
chain = OntotextGraphDBQAChain.from_llm(
ChatOpenAI(temperature=0, model_name=model_name), # type: ignore[call-arg]
graph=graph,
verbose=True, # type: ignore[call-arg]
)
try:
chain.invoke({chain.input_key: question})
except ValueError:
pass
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@chains@[email protected]_END.py
|
{
"filename": "_domain.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/_domain.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DomainValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="domain", parent_name="treemap", **kwargs):
super(DomainValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Domain"),
data_docs=kwargs.pop(
"data_docs",
"""
column
If there is a layout grid, use the domain for
this column in the grid for this treemap trace
.
row
If there is a layout grid, use the domain for
this row in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap
trace (in plot fraction).
y
Sets the vertical domain of this treemap trace
(in plot fraction).
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "brinckmann/montepython_public",
"repo_path": "montepython_public_extracted/montepython_public-master/montepython/likelihoods/core_m5_lowl/__init__.py",
"type": "Python"
}
|
# there is no specific likelihood code for this experiment, because it
# falls in the category of CMB experiments described in the "mock CMB"
# format. The class below inherits the properties of a general class
# "Likelihood_mock_cmb", which knows how to deal with all experiments in
# "mock CMB" format.
from montepython.likelihood_class import Likelihood_mock_cmb
class core_m5_lowl(Likelihood_mock_cmb):
pass
|
brinckmannREPO_NAMEmontepython_publicPATH_START.@montepython_public_extracted@montepython_public-master@montepython@likelihoods@core_m5_lowl@[email protected]_END.py
|
{
"filename": "plot_waterfalls.py",
"repo_name": "CHIME-Pulsar-Timing/CHIME-Pulsar_automated_filterbank",
"repo_path": "CHIME-Pulsar_automated_filterbank_extracted/CHIME-Pulsar_automated_filterbank-main/plot_waterfalls.py",
"type": "Python"
}
|
import sys
from presto import waterfaller
from presto.filterbank import FilterbankFile
import csv
#lets first parse the single pulse file
fil_file = sys.argv[1]
sp_file = sys.argv[3:]
fil = fil_file.strip('.fil')
snr_default = float(sys.argv[2])
#this is code to select time ranges to make waterfalls
search_file = fil+'_search.csv'
import os.path as path
search =path.exists(search_file)
if search:
with open(search_file) as file:
line = file.read()
l=line.split(',')
else:
l=[0,999999999]
for file in sp_file:
times=[]
with open(file) as csvfile:
r=csv.reader(csvfile,delimiter=' ')
for i,row in enumerate(r):
if i>0:
new_row = []
for item in row:
if item:
new_row.append(item)
if float(new_row[1])>snr_default:
times.append(float(new_row[2])-0.25)
dm=float(new_row[0])
for time in times:
for i in range(0,len(l),2):
if (time>float(l[i])) & (time<float(l[i+1])):
maskfn=fil+'_rfifind.mask'
raw = FilterbankFile(fil_file)
data,nbinsextra,nbins,start=waterfaller.waterfall(raw,time,0.5,dm=dm,nsub=256,downsamp=16,mask=True,maskfn=maskfn)
waterfaller.plot_waterfall(data,start,0.5,integrate_ts=True,integrate_spec=True,cmap_str='binary',save_path=fil)
|
CHIME-Pulsar-TimingREPO_NAMECHIME-Pulsar_automated_filterbankPATH_START.@CHIME-Pulsar_automated_filterbank_extracted@CHIME-Pulsar_automated_filterbank-main@[email protected]_END.py
|
{
"filename": "shaped_expander.py",
"repo_name": "CU-NESS/pylinex",
"repo_path": "pylinex_extracted/pylinex-master/examples/expander/shaped_expander.py",
"type": "Python"
}
|
"""
File: examples/expander/shaped_expander.py
Author: Keith Tauscher
Date: 10 Sep 2017
Description: Example showing how to use the ShapedExpander class which allows
Expander's to deal with non-flat arrays.
"""
import os
import numpy as np
from pylinex import NullExpander, ShapedExpander, load_expander_from_hdf5_file
expander = ShapedExpander(NullExpander(), (2, 6), (3, 4))
input_array = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]])
expected_output = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
assert np.all(expander(input_array) == expected_output)
file_name = 'test_shaped_expander_TEMP.hdf5'
expander.save(file_name)
try:
assert expander == load_expander_from_hdf5_file(file_name)
except:
os.remove(file_name)
raise
os.remove(file_name)
|
CU-NESSREPO_NAMEpylinexPATH_START.@pylinex_extracted@pylinex-master@examples@expander@[email protected]_END.py
|
{
"filename": "clik_cldf_ls.py",
"repo_name": "CosmoLike/cocoa",
"repo_path": "cocoa_extracted/cocoa-main/Cocoa/external_modules/code/planck/code/spt_clik/src/python/tools/clik_cldf_ls.py",
"type": "Python"
}
|
#! PYTHONEXE
import sys
sys.path = ["REPLACEPATH"]+sys.path
import numpy as nm
import clik.cldf as cldf
import os.path as osp
import os
def main(argv):
base = (argv[1])
try:
f = cldf.File(base)
kk = list(f.keys())
except IOError as e:
print("Can't cldf_ls %s"%argv[1])
return
kk.sort()
res = []
for k in kk:
if isinstance(f[k],cldf.File):
res +=[k+"/"]
else:
res +=[k]
rows, columns = os.popen('stty size', 'r').read().split()
col = int(columns)
sz = max([len(k)+1 for k in res])
sz = max(20,sz)
cc = col/sz
cnt = 0
txt = ""
for k in res:
txt += k.ljust(sz)
cnt +=1
if cnt == cc:
cnt = 0
txt +="\n"
print(txt)
import sys
if __name__=="__main__":
main(sys.argv)
|
CosmoLikeREPO_NAMEcocoaPATH_START.@cocoa_extracted@cocoa-main@Cocoa@external_modules@code@planck@code@spt_clik@src@python@tools@[email protected]_END.py
|
{
"filename": "global_average_pooling2d.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/pooling/global_average_pooling2d.py",
"type": "Python"
}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_global_pooling import BaseGlobalPooling
@keras_export(
[
"keras.layers.GlobalAveragePooling2D",
"keras.layers.GlobalAvgPool2D",
]
)
class GlobalAveragePooling2D(BaseGlobalPooling):
"""Global average pooling operation for 2D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, height, width, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, height, width)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 3)
>>> y = keras.layers.GlobalAveragePooling2D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=2,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@pooling@[email protected]_END.py
|
{
"filename": "mnist_classifier_fromscratch.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/examples/mnist_classifier_fromscratch.py",
"type": "Python"
}
|
# Copyright 2018 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic MNIST example using Numpy and JAX.
The primary aim here is simplicity and minimal dependencies.
"""
import time
import numpy.random as npr
from jax import jit, grad
from jax.scipy.special import logsumexp
import jax.numpy as jnp
from examples import datasets
def init_random_params(scale, layer_sizes, rng=npr.RandomState(0)):
return [(scale * rng.randn(m, n), scale * rng.randn(n))
for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
def predict(params, inputs):
activations = inputs
for w, b in params[:-1]:
outputs = jnp.dot(activations, w) + b
activations = jnp.tanh(outputs)
final_w, final_b = params[-1]
logits = jnp.dot(activations, final_w) + final_b
return logits - logsumexp(logits, axis=1, keepdims=True)
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -jnp.mean(jnp.sum(preds * targets, axis=1))
def accuracy(params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=1)
predicted_class = jnp.argmax(predict(params, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
if __name__ == "__main__":
layer_sizes = [784, 1024, 1024, 10]
param_scale = 0.1
step_size = 0.001
num_epochs = 10
batch_size = 128
train_images, train_labels, test_images, test_labels = datasets.mnist()
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
@jit
def update(params, batch):
grads = grad(loss)(params, batch)
return [(w - step_size * dw, b - step_size * db)
for (w, b), (dw, db) in zip(params, grads)]
params = init_random_params(param_scale, layer_sizes)
for epoch in range(num_epochs):
start_time = time.time()
for _ in range(num_batches):
params = update(params, next(batches))
epoch_time = time.time() - start_time
train_acc = accuracy(params, (train_images, train_labels))
test_acc = accuracy(params, (test_images, test_labels))
print(f"Epoch {epoch} in {epoch_time:0.2f} sec")
print(f"Training set accuracy {train_acc}")
print(f"Test set accuracy {test_acc}")
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@examples@[email protected]_END.py
|
{
"filename": "HotJupiterPhaseCurve.ipynb",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/notebooks/HotJupiterPhaseCurve.ipynb",
"type": "Jupyter Notebook"
}
|
# Hot jupiter phase curve example
```python
%matplotlib inline
```
```python
%run notebook_setup.py
```
In this notebook, we'll run through a brief example of how to model a full hot jupiter light curve -- including the transit, secondary eclipse, and phase curve -- using the machinery of the `exoplanet` package.
Let's begin with our custom imports. Note that we want to run `starry` in `lazy` mode (the default), since we need to be able to compute analytic derivatives of the model for use in `pymc3`.
```python
import starry
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pymc3_ext as pmx
import exoplanet
starry.config.quiet = True
np.random.seed(1)
```
## Generating a dataset
Let's generate some synthetic data. First we create a star...
```python
A = starry.Primary(starry.Map(ydeg=0, udeg=2, amp=1.0), m=1.0, r=1.0, prot=1.0)
A.map[1] = 0.4
A.map[2] = 0.2
```
... and now we instantiate the planet...
```python
# These are the parameters we're going to try to infer
log_amp_true = -3.0
offset_true = 30.0
b = starry.Secondary(
starry.Map(ydeg=1, udeg=0, amp=10 ** log_amp_true, inc=90.0, obl=0.0),
m=0.0,
r=0.1,
inc=90.0,
prot=1.0,
porb=1.0,
)
b.map[1, 0] = 0.5
b.theta0 = 180.0 + offset_true
```
Most of the parameters should be self-explanatory (check the docs for details). For the planet, we give it a simple dipole map by setting only the $Y_{1,0}$ coefficient. We then set the `theta0` parameter to be $180^\circ$ plus an offset, which we set to be $30^\circ$. The parameter `theta0` is the rotational phase of the map at the reference time `t0`, which in this case is the time of transit. For a tidally-locked close-in planet, we usually want the bright side of the map to be facing the star at that point, which we accomplish by setting `theta0=180`. The offset captures the misalignment between the hot spot of the planet and the sub-stellar point, as is seen in the hot jupiter [HD 189733b](https://ui.adsabs.harvard.edu/abs/2012ApJ...747L..20M/abstract). In this notebook, we'll attempt to solve for this value.
Next, we instantiate the system:
```python
sys = starry.System(A, b)
```
We can now generate a synthetic light curve, and add some noise:
```python
t = np.linspace(-0.3, 1.3, 1000)
flux_true = sys.flux(t).eval()
ferr = 1e-4
flux = flux_true + ferr * np.random.randn(len(t))
plt.figure(figsize=(12, 5))
plt.plot(t, flux, "k.", alpha=0.3, ms=3)
plt.plot(t, flux_true)
plt.xlabel("Time [days]", fontsize=24)
plt.ylabel("Flux [normalized]", fontsize=24);
```
By eye we can tell there's an offset, since the peak in the phase curve does not coincide with the secondary eclipse.
## Fitting the data
We're going to fit this light curve using `exoplanet` and `pymc3`. Let's begin fresh and define a new star, planet, and system, this time *within a pymc3 model context*:
```python
with pm.Model() as model:
# These are the variables we're solving for;
# here we're placing wide Gaussian priors on them.
offset = pm.Normal("offset", 0.0, 50.0, testval=0.11)
log_amp = pm.Normal("log_amp", -4.0, 2.0, testval=-3.91)
# Instantiate the star; all its parameters are assumed
# to be known exactly
A = starry.Primary(
starry.Map(ydeg=0, udeg=2, amp=1.0, inc=90.0, obl=0.0), m=1.0, r=1.0, prot=1.0
)
A.map[1] = 0.4
A.map[2] = 0.2
# Instantiate the planet. Everything is fixed except for
# its luminosity and the hot spot offset.
b = starry.Secondary(
starry.Map(ydeg=1, udeg=0, amp=10 ** log_amp, inc=90.0, obl=0.0),
m=0.0,
r=0.1,
prot=1.0,
porb=1.0,
)
b.map[1, 0] = 0.5
b.theta0 = 180.0 + offset
# Instantiate the system as before
sys = starry.System(A, b)
# Our model for the flux
flux_model = pm.Deterministic("flux_model", sys.flux(t))
# This is how we tell `pymc3` about our observations;
# we are assuming they are ampally distributed about
# the true model. This line effectively defines our
# likelihood function.
pm.Normal("obs", flux_model, sd=ferr, observed=flux)
```
Great! The first thing we usually do is run this model through an optimizer (which is usually fast, since `starry` computes derivatives):
```python
with model:
map_soln = pmx.optimize()
```
Here's what our best model looks like:
```python
plt.figure(figsize=(12, 5))
plt.plot(t, flux, "k.", alpha=0.3, ms=3)
plt.plot(t, map_soln["flux_model"])
plt.xlabel("Time [days]", fontsize=24)
plt.ylabel("Flux [normalized]", fontsize=24);
```
And here are the best-fit values of the two parameters:
```python
print("offset:", map_soln["offset"])
print("log_amp:", map_soln["log_amp"])
```
Not bad! If we just cared about finding the best solution, we'd be done, but we actually want posteriors over the model parameters. For this, we're going to do sampling with `pymc3`:
```python
with model:
trace = pmx.sample(
tune=250,
draws=500,
start=map_soln,
chains=4,
cores=1,
target_accept=0.9,
)
```
And we're done! It's usually a good idea to look at a summary of the sampling procedure:
```python
pm.summary(trace, var_names=["log_amp", "offset"])
```
The `mc_errors` are relatively small, the `Rhat` convergence criterion is close to 1, and the number of effective samples `n_eff` is over 1000, all of which are good. We should probably run the sampler a bit longer, but this should be good enough for demonstration purposes. Let's plot our posterior distributions:
```python
import corner
samples = pm.trace_to_dataframe(trace, varnames=["log_amp", "offset"])
corner.corner(
np.array(samples),
truths=[log_amp_true, offset_true],
labels=[r"$\log\,\mathrm{amplitude}$", r"$\mathrm{offset}$"],
);
```
Looks great! The blue lines indicate the true values.
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@[email protected]@.PATH_END.py
|
{
"filename": "test_pairTask.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/tests/task/test_pairTask.py",
"type": "Python"
}
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import unittest
import lsst.geom
import lsst.pex.config as pexConfig
from lsst.afw.coord import Observatory
from lsst.afw.image import VisitInfo
from lsst.daf.base import DateTime
from lsst.ts.wep.task import (
ExposurePairer,
ExposurePairerConfig,
GroupPairer,
GroupPairerConfig,
)
class TestPairTask(unittest.TestCase):
def _createVisitInfoDict(
self,
ra_deg_start: float = 0.0,
dec_deg_start: float = 0.0,
time_mjd_start: float = 60432.0,
rot_deg_start: float = 0.0,
radec_sep_deg: float = 0.0,
time_sep_sec: float = 0.0,
rot_sep_deg: float = 0.0,
intra_id: int = 0,
extra_id: int = 1,
) -> typing.Dict[int, VisitInfo]:
intra_boresight = lsst.geom.SpherePoint(
ra_deg_start, dec_deg_start, lsst.geom.degrees
)
intra_mjd = DateTime(time_mjd_start)
intra_focus_z = -1.5
intra_rtp = lsst.geom.Angle(rot_deg_start, lsst.geom.degrees)
intra_visit_info = VisitInfo(
date=intra_mjd,
boresightRaDec=intra_boresight,
boresightRotAngle=intra_rtp,
focusZ=intra_focus_z,
instrumentLabel="LSSTCam",
observatory=Observatory(
lsst.geom.Angle(-30.2446, lsst.geom.degrees),
lsst.geom.Angle(-70.7494, lsst.geom.degrees),
2663,
),
era=lsst.geom.Angle(1.7487, lsst.geom.radians),
)
extra_boresight = lsst.geom.SpherePoint(
ra_deg_start, dec_deg_start + radec_sep_deg, lsst.geom.degrees
)
extra_mjd = DateTime(time_mjd_start + time_sep_sec / 86400)
extra_focus_z = 1.5
extra_rtp = lsst.geom.Angle(rot_deg_start + rot_sep_deg, lsst.geom.degrees)
extra_visit_info = VisitInfo(
date=extra_mjd,
boresightRaDec=extra_boresight,
boresightRotAngle=extra_rtp,
focusZ=extra_focus_z,
instrumentLabel="LSSTCam",
observatory=Observatory(
lsst.geom.Angle(-30.2446, lsst.geom.degrees),
lsst.geom.Angle(-70.7494, lsst.geom.degrees),
2663,
),
era=lsst.geom.Angle(1.7487, lsst.geom.radians),
)
return {intra_id: intra_visit_info, extra_id: extra_visit_info}
def testExposurePairer(self) -> None:
task_config = ExposurePairerConfig()
task = ExposurePairer(config=task_config)
visit_info_dict = self._createVisitInfoDict()
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
self.assertEqual(task_output[0].intra, 0)
self.assertEqual(task_output[0].extra, 1)
def testExposurePairerRaDecThresh(self) -> None:
task_config = ExposurePairerConfig(pointingThreshold=2.0 * 3600)
task = ExposurePairer(config=task_config)
visit_info_dict = self._createVisitInfoDict(radec_sep_deg=1.0)
visit_info_dict_same_radec = self._createVisitInfoDict(ra_deg_start=25.0)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
task_config = ExposurePairerConfig(pointingThreshold=0.9 * 3600)
task = ExposurePairer(config=task_config)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 0)
visit_info_dict[2] = visit_info_dict_same_radec[0]
visit_info_dict[3] = visit_info_dict_same_radec[1]
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
self.assertEqual(task_output[0].intra, 2)
self.assertEqual(task_output[0].extra, 3)
def testExposurePairerTimeThresh(self) -> None:
task_config = ExposurePairerConfig(timeThreshold=65)
task = ExposurePairer(config=task_config)
visit_info_dict = self._createVisitInfoDict(time_sep_sec=61)
visit_info_dict_same_time = self._createVisitInfoDict(time_mjd_start=60432.1)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
task_config = ExposurePairerConfig(timeThreshold=60)
task = ExposurePairer(config=task_config)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 0)
visit_info_dict[2] = visit_info_dict_same_time[0]
visit_info_dict[3] = visit_info_dict_same_time[1]
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
self.assertEqual(task_output[0].intra, 2)
self.assertEqual(task_output[0].extra, 3)
def testExposurePairerRotationThresh(self) -> None:
task_config = ExposurePairerConfig(rotationThreshold=2.0)
task = ExposurePairer(config=task_config)
visit_info_dict = self._createVisitInfoDict(rot_sep_deg=1.0)
visit_info_dict_same_rot = self._createVisitInfoDict(rot_deg_start=25.0)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
task_config = ExposurePairerConfig(rotationThreshold=0.9)
task = ExposurePairer(config=task_config)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 0)
visit_info_dict[2] = visit_info_dict_same_rot[0]
visit_info_dict[3] = visit_info_dict_same_rot[1]
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 1)
self.assertEqual(task_output[0].intra, 2)
self.assertEqual(task_output[0].extra, 3)
task_config = ExposurePairerConfig(ignoreThresholds=True)
task = ExposurePairer(config=task_config)
task_output = task.run(visit_info_dict)
self.assertEqual(len(task_output), 2)
def testExposurePairerForceUniquePairs(self) -> None:
task_config = ExposurePairerConfig()
task = ExposurePairer(config=task_config)
visit_info_dict = self._createVisitInfoDict()
visit_info_dict_multiple_intra = {}
visit_info_dict_multiple_intra[0] = visit_info_dict[0]
visit_info_dict_multiple_intra[1] = visit_info_dict[1]
visit_info_dict_multiple_intra[2] = visit_info_dict[1]
task_output = task.run(visit_info_dict_multiple_intra)
self.assertEqual(len(task_output), 1)
self.assertEqual(task_output[0].intra, 0)
self.assertEqual(task_output[0].extra, 1)
task_config = ExposurePairerConfig(forceUniquePairs=False)
task = ExposurePairer(config=task_config)
task_output = task.run(visit_info_dict_multiple_intra)
self.assertEqual(len(task_output), 2)
self.assertEqual(task_output[0].intra, 0)
self.assertEqual(task_output[0].extra, 1)
self.assertEqual(task_output[1].intra, 0)
self.assertEqual(task_output[1].extra, 2)
def testGroupPairerWrongNumberInGroup(self) -> None:
task_config = GroupPairerConfig()
task = GroupPairer(config=task_config)
# Test 4,3,3,2 VisitInfos. Should succeed if len is 2 or 3
visit_info_dict = self._createVisitInfoDict()
visit_info_dict.update(self._createVisitInfoDict(intra_id=2, extra_id=3))
while visit_info_dict:
if len(visit_info_dict) in (2, 3):
with self.assertNoLogs(logger=task.log.name, level="WARNING"):
task.run(visit_info_dict)
else:
with self.assertLogs(logger=task.log.name, level="WARNING"):
task.run(visit_info_dict)
visit_info_dict.popitem()
def testGroupPairerIgnoreThresholds(self) -> None:
task_config = GroupPairerConfig(ignoreThresholds=False)
with self.assertRaises(pexConfig.FieldValidationError):
GroupPairer(config=task_config)
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@tests@task@[email protected]_END.py
|
{
"filename": "line_search.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/scipy/optimize/line_search.py",
"type": "Python"
}
|
# Copyright 2020 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import NamedTuple
from functools import partial
from jax._src.numpy.util import promote_dtypes_inexact
import jax.numpy as jnp
import jax
from jax import lax
_dot = partial(jnp.dot, precision=lax.Precision.HIGHEST)
def _cubicmin(a, fa, fpa, b, fb, c, fc):
dtype = jnp.result_type(a, fa, fpa, b, fb, c, fc)
C = fpa
db = b - a
dc = c - a
denom = (db * dc) ** 2 * (db - dc)
d1 = jnp.array([[dc ** 2, -db ** 2],
[-dc ** 3, db ** 3]], dtype=dtype)
d2 = jnp.array([fb - fa - C * db, fc - fa - C * dc], dtype=dtype)
A, B = _dot(d1, d2) / denom
radical = B * B - 3. * A * C
xmin = a + (-B + jnp.sqrt(radical)) / (3. * A)
return xmin
def _quadmin(a, fa, fpa, b, fb):
D = fa
C = fpa
db = b - a
B = (fb - D - C * db) / (db ** 2)
xmin = a - C / (2. * B)
return xmin
def _binary_replace(replace_bit, original_dict, new_dict, keys=None):
if keys is None:
keys = new_dict.keys()
return {key: jnp.where(replace_bit, new_dict[key], original_dict[key])
for key in keys}
class _ZoomState(NamedTuple):
done: bool | jax.Array
failed: bool | jax.Array
j: int | jax.Array
a_lo: float | jax.Array
phi_lo: float | jax.Array
dphi_lo: float | jax.Array
a_hi: float | jax.Array
phi_hi: float | jax.Array
dphi_hi: float | jax.Array
a_rec: float | jax.Array
phi_rec: float | jax.Array
a_star: float | jax.Array
phi_star: float | jax.Array
dphi_star: float | jax.Array
g_star: float | jax.Array
nfev: int | jax.Array
ngev: int | jax.Array
def _zoom(restricted_func_and_grad, wolfe_one, wolfe_two, a_lo, phi_lo,
dphi_lo, a_hi, phi_hi, dphi_hi, g_0, pass_through):
"""
Implementation of zoom. Algorithm 3.6 from Wright and Nocedal, 'Numerical
Optimization', 1999, pg. 59-61. Tries cubic, quadratic, and bisection methods
of zooming.
"""
state = _ZoomState(
done=False,
failed=False,
j=0,
a_lo=a_lo,
phi_lo=phi_lo,
dphi_lo=dphi_lo,
a_hi=a_hi,
phi_hi=phi_hi,
dphi_hi=dphi_hi,
a_rec=(a_lo + a_hi) / 2.,
phi_rec=(phi_lo + phi_hi) / 2.,
a_star=1.,
phi_star=phi_lo,
dphi_star=dphi_lo,
g_star=g_0,
nfev=0,
ngev=0,
)
delta1 = 0.2
delta2 = 0.1
def body(state):
# Body of zoom algorithm. We use boolean arithmetic to avoid using jax.cond
# so that it works on GPU/TPU.
dalpha = (state.a_hi - state.a_lo)
a = jnp.minimum(state.a_hi, state.a_lo)
b = jnp.maximum(state.a_hi, state.a_lo)
cchk = delta1 * dalpha
qchk = delta2 * dalpha
# This will cause the line search to stop, and since the Wolfe conditions
# are not satisfied the minimization should stop too.
threshold = jnp.where((jnp.finfo(dalpha.dtype).bits < 64), 1e-5, 1e-10)
state = state._replace(failed=state.failed | (dalpha <= threshold))
# Cubmin is sometimes nan, though in this case the bounds check will fail.
a_j_cubic = _cubicmin(state.a_lo, state.phi_lo, state.dphi_lo, state.a_hi,
state.phi_hi, state.a_rec, state.phi_rec)
use_cubic = (state.j > 0) & (a_j_cubic > a + cchk) & (a_j_cubic < b - cchk)
a_j_quad = _quadmin(state.a_lo, state.phi_lo, state.dphi_lo, state.a_hi, state.phi_hi)
use_quad = (~use_cubic) & (a_j_quad > a + qchk) & (a_j_quad < b - qchk)
a_j_bisection = (state.a_lo + state.a_hi) / 2.
use_bisection = (~use_cubic) & (~use_quad)
a_j = jnp.where(use_cubic, a_j_cubic, state.a_rec)
a_j = jnp.where(use_quad, a_j_quad, a_j)
a_j = jnp.where(use_bisection, a_j_bisection, a_j)
# TODO(jakevdp): should we use some sort of fixed-point approach here instead?
phi_j, dphi_j, g_j = restricted_func_and_grad(a_j)
phi_j = phi_j.astype(state.phi_lo.dtype)
dphi_j = dphi_j.astype(state.dphi_lo.dtype)
g_j = g_j.astype(state.g_star.dtype)
state = state._replace(nfev=state.nfev + 1,
ngev=state.ngev + 1)
hi_to_j = wolfe_one(a_j, phi_j) | (phi_j >= state.phi_lo)
star_to_j = wolfe_two(dphi_j) & (~hi_to_j)
hi_to_lo = (dphi_j * (state.a_hi - state.a_lo) >= 0.) & (~hi_to_j) & (~star_to_j)
lo_to_j = (~hi_to_j) & (~star_to_j)
state = state._replace(
**_binary_replace(
hi_to_j,
state._asdict(),
dict(
a_hi=a_j,
phi_hi=phi_j,
dphi_hi=dphi_j,
a_rec=state.a_hi,
phi_rec=state.phi_hi,
),
),
)
# for termination
state = state._replace(
done=star_to_j | state.done,
**_binary_replace(
star_to_j,
state._asdict(),
dict(
a_star=a_j,
phi_star=phi_j,
dphi_star=dphi_j,
g_star=g_j,
)
),
)
state = state._replace(
**_binary_replace(
hi_to_lo,
state._asdict(),
dict(
a_hi=state.a_lo,
phi_hi=state.phi_lo,
dphi_hi=state.dphi_lo,
a_rec=state.a_hi,
phi_rec=state.phi_hi,
),
),
)
state = state._replace(
**_binary_replace(
lo_to_j,
state._asdict(),
dict(
a_lo=a_j,
phi_lo=phi_j,
dphi_lo=dphi_j,
a_rec=state.a_lo,
phi_rec=state.phi_lo,
),
),
)
state = state._replace(j=state.j + 1)
# Choose higher cutoff for maxiter than Scipy as Jax takes longer to find
# the same value - possibly floating point issues?
state = state._replace(failed= state.failed | (state.j >= 30))
return state
state = lax.while_loop(lambda state: (~state.done) & (~pass_through) & (~state.failed),
body,
state)
return state
class _LineSearchState(NamedTuple):
done: bool | jax.Array
failed: bool | jax.Array
i: int | jax.Array
a_i1: float | jax.Array
phi_i1: float | jax.Array
dphi_i1: float | jax.Array
nfev: int | jax.Array
ngev: int | jax.Array
a_star: float | jax.Array
phi_star: float | jax.Array
dphi_star: float | jax.Array
g_star: jax.Array
class _LineSearchResults(NamedTuple):
"""Results of line search.
Parameters:
failed: True if the strong Wolfe criteria were satisfied
nit: integer number of iterations
nfev: integer number of functions evaluations
ngev: integer number of gradients evaluations
k: integer number of iterations
a_k: integer step size
f_k: final function value
g_k: final gradient value
status: integer end status
"""
failed: bool | jax.Array
nit: int | jax.Array
nfev: int | jax.Array
ngev: int | jax.Array
k: int | jax.Array
a_k: int | jax.Array
f_k: jax.Array
g_k: jax.Array
status: bool | jax.Array
def line_search(f, xk, pk, old_fval=None, old_old_fval=None, gfk=None, c1=1e-4,
c2=0.9, maxiter=20):
"""Inexact line search that satisfies strong Wolfe conditions.
Algorithm 3.5 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-61
Args:
fun: function of the form f(x) where x is a flat ndarray and returns a real
scalar. The function should be composed of operations with vjp defined.
x0: initial guess.
pk: direction to search in. Assumes the direction is a descent direction.
old_fval, gfk: initial value of value_and_gradient as position.
old_old_fval: unused argument, only for scipy API compliance.
maxiter: maximum number of iterations to search
c1, c2: Wolfe criteria constant, see ref.
Returns: LineSearchResults
"""
xk, pk = promote_dtypes_inexact(xk, pk)
def restricted_func_and_grad(t):
t = jnp.array(t, dtype=pk.dtype)
phi, g = jax.value_and_grad(f)(xk + t * pk)
dphi = jnp.real(_dot(g, pk))
return phi, dphi, g
if old_fval is None or gfk is None:
phi_0, dphi_0, gfk = restricted_func_and_grad(0)
else:
phi_0 = old_fval
dphi_0 = jnp.real(_dot(gfk, pk))
if old_old_fval is not None:
candidate_start_value = 1.01 * 2 * (phi_0 - old_old_fval) / dphi_0
start_value = jnp.where(candidate_start_value > 1, 1.0, candidate_start_value)
else:
start_value = 1
def wolfe_one(a_i, phi_i):
# actually negation of W1
return phi_i > phi_0 + c1 * a_i * dphi_0
def wolfe_two(dphi_i):
return jnp.abs(dphi_i) <= -c2 * dphi_0
state = _LineSearchState(
done=False,
failed=False,
# algorithm begins at 1 as per Wright and Nocedal, however Scipy has a
# bug and starts at 0. See https://github.com/scipy/scipy/issues/12157
i=1,
a_i1=0.,
phi_i1=phi_0,
dphi_i1=dphi_0,
nfev=1 if (old_fval is None or gfk is None) else 0,
ngev=1 if (old_fval is None or gfk is None) else 0,
a_star=0.,
phi_star=phi_0,
dphi_star=dphi_0,
g_star=gfk,
)
def body(state):
# no amax in this version, we just double as in scipy.
# unlike original algorithm we do our next choice at the start of this loop
a_i = jnp.where(state.i == 1, start_value, state.a_i1 * 2.)
phi_i, dphi_i, g_i = restricted_func_and_grad(a_i)
state = state._replace(nfev=state.nfev + 1,
ngev=state.ngev + 1)
star_to_zoom1 = wolfe_one(a_i, phi_i) | ((phi_i >= state.phi_i1) & (state.i > 1))
star_to_i = wolfe_two(dphi_i) & (~star_to_zoom1)
star_to_zoom2 = (dphi_i >= 0.) & (~star_to_zoom1) & (~star_to_i)
zoom1 = _zoom(restricted_func_and_grad,
wolfe_one,
wolfe_two,
state.a_i1,
state.phi_i1,
state.dphi_i1,
a_i,
phi_i,
dphi_i,
gfk,
~star_to_zoom1)
state = state._replace(nfev=state.nfev + zoom1.nfev,
ngev=state.ngev + zoom1.ngev)
zoom2 = _zoom(restricted_func_and_grad,
wolfe_one,
wolfe_two,
a_i,
phi_i,
dphi_i,
state.a_i1,
state.phi_i1,
state.dphi_i1,
gfk,
~star_to_zoom2)
state = state._replace(nfev=state.nfev + zoom2.nfev,
ngev=state.ngev + zoom2.ngev)
state = state._replace(
done=star_to_zoom1 | state.done,
failed=(star_to_zoom1 & zoom1.failed) | state.failed,
**_binary_replace(
star_to_zoom1,
state._asdict(),
zoom1._asdict(),
keys=['a_star', 'phi_star', 'dphi_star', 'g_star'],
),
)
state = state._replace(
done=star_to_i | state.done,
**_binary_replace(
star_to_i,
state._asdict(),
dict(
a_star=a_i,
phi_star=phi_i,
dphi_star=dphi_i,
g_star=g_i,
),
),
)
state = state._replace(
done=star_to_zoom2 | state.done,
failed=(star_to_zoom2 & zoom2.failed) | state.failed,
**_binary_replace(
star_to_zoom2,
state._asdict(),
zoom2._asdict(),
keys=['a_star', 'phi_star', 'dphi_star', 'g_star'],
),
)
state = state._replace(i=state.i + 1, a_i1=a_i, phi_i1=phi_i, dphi_i1=dphi_i)
return state
state = lax.while_loop(lambda state: (~state.done) & (state.i <= maxiter) & (~state.failed),
body,
state)
status = jnp.where(
state.failed,
jnp.array(1), # zoom failed
jnp.where(
state.i > maxiter,
jnp.array(3), # maxiter reached
jnp.array(0), # passed (should be)
),
)
# Step sizes which are too small causes the optimizer to get stuck with a
# direction of zero in <64 bit mode - avoid with a floor on minimum step size.
alpha_k = jnp.asarray(state.a_star)
alpha_k = jnp.where((jnp.finfo(alpha_k.dtype).bits != 64)
& (jnp.abs(alpha_k) < 1e-8),
jnp.sign(alpha_k) * 1e-8,
alpha_k)
results = _LineSearchResults(
failed=state.failed | (~state.done),
nit=state.i - 1, # because iterations started at 1
nfev=state.nfev,
ngev=state.ngev,
k=state.i,
a_k=alpha_k,
f_k=state.phi_star,
g_k=state.g_star,
status=status,
)
return results
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@scipy@optimize@[email protected]_END.py
|
{
"filename": "EVLACal.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/python/EVLACal.py",
"type": "Python"
}
|
"""
"""
from __future__ import absolute_import
from __future__ import print_function
import UV, UVDesc, Image, ImageDesc, FArray, ObitTask, AIPSTask, AIPSDir, OErr, History
import InfoList, Table, OSystem, OASDM
from AIPS import AIPS
from FITS import FITS
from AIPSDir import AIPSdisks, nAIPS
from OTObit import Acat, AMcat, getname, zap, imhead, tabdest, tput
from Obit import Version
from PipeUtil import *
import os, os.path, re, shutil, pickle, math, copy, pprint, string
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error, six.moves.urllib.request, six.moves.urllib.error, six.moves.urllib.parse
import sys #, commands
import datetime
import xml.dom.minidom
from six.moves import range
manifest = { 'project' : [], # list of project output files
'source' : {} } # dict of source output files
def EVLAInitContParms():
"""
Initialize EVLA continuum pipeline parameters
Creates and initializes the parameter dictionary. Returns python dict with
parameters.
"""
################################################################
parms = {}
# General data parameters
parms["check"] = False # Only check script, don't execute tasks
parms["debug"] = False # run tasks debug
parms["Compress"] = False # Use compressed UV data?
parms["doSwPwr"] = False # Make EVLA Switched power corr?
parms["calInt"] = None # Calibration table interval (min)
parms["seq"] = 1 # Sequence number for AIPS files
parms["doLoadArchive"] = True # Sequence number for AIPS filesLoad AIPS data from archive?
# Hanning
parms["doHann"] = None # Hanning needed for RFI?
parms["doDescm"] = True # Descimate Hanning?
# SY Calibration, only PwrDif used in calibration
parms["doSYCal"] = False # Calibration from SysPower (AIPS SY)?
parms["SYSWUse"] = None # SW substitutions for SY solutions, None= as defined
parms["SYcalInt"] = 6.0/60. # SY derives SN table interval
parms["SYsmoFunc"] = "MWF" # SY smoothing function 'MWF', "BOX", "GAUS"
parms["SYsmoTime"] = 10./3600 # smooth time in hrs
parms["SYclipSmo"] = 300./3600 # smooth time for clip in hrs
parms["SYclipParm"] = 5. # clip level (sigma)
parms["doSYEdit"] = True # Edit/flag on the basis of SY solutions
parms["SYEditFG"] = 2 # FG table to add flags to, <=0 -> no FG entries
parms["SYSigma"] = 10. # Multiple of median RMS about median gain to clip/flag
# Parallactic angle correction
parms["doPACor"] = True # Make parallactic angle correction
# Special editing list
parms["doEditList"] = False # Edit using editList?
parms["editFG"] = 2 # Table to apply edit list to
editList = [ \
# "timer":("0/06:09:0.0","0/06:13:0.0"),"Ant":[ 8,0],"IFs":[2,2],"Chans":[1,0],"Stokes":'1110',"Reason":"bad data"},
]
parms["editList"] = editList
# Do median flagging
parms["doMedn"] = True # Median editing?
parms["mednSigma"] = 10.0 # Median sigma clipping level
parms["mednTimeWind"] = 1.0 # Median window width in min for median flagging
parms["mednAvgTime"] = 10.0/60. # Median Averaging time in min
parms["mednAvgFreq"] = 0 # Median 1=>avg chAvg chans, 2=>avg all chan, 3=> avg chan and IFs
parms["mednChAvg"] = 1 # Median number of channels to average
parms["mednTarg"] = " " # Median Flagging target, blank = all, source or list
# Editing
parms["doClearTab"] = True # Clear cal/edit tables
parms["doClearGain"] = True # Clear SN and CL tables >1
parms["doClearFlag"] = True # Clear FG tables > 1
parms["doClearBP"] = True # Clear BP tables?
parms["doCopyFG"] = True # Copy FG 1 to FG 2quack
parms["doQuack"] = True # Quack data?
parms["quackBegDrop"] = 0.1 # Time to drop from start of each scan in min
parms["quackEndDrop"] = 0.0 # Time to drop from end of each scan in min
parms["quackReason"] = "Quack" # Reason string
parms["doShad"] = None # Shadow flagging (config dependent)
parms["shadBl"] = 25.0 # Minimum shadowing baseline (m)
parms["doFD1"] = True # Do initial frequency domain flagging
parms["FD1widMW"] = None # Width of the initial FD median window
parms["FD1maxRes"] = 5.0 # Clipping level in sigma
parms["FD1TimeAvg"] = 1.0 # time averaging in min. for initial FD flagging
parms["doMedn"] = True # Median editing?
parms["mednSigma"] = 5.0 # Median sigma clipping level
parms["timeWind"] = 1.0 # Median window width in min for median flagging
parms["avgTime"] = 10.0/60. # Averaging time in min
parms["avgFreq"] = 0 # 1=>avg chAvg chans, 2=>avg all chan, 3=> avg chan and IFs
parms["chAvg"] = 1 # number of channels to average
parms["doRMSAvg"] = True # Edit calibrators by RMSAvg?
parms["RMSAvg"] = 3.0 # AutoFlag Max RMS/Avg for time domain RMS filtering
parms["RMSTimeAvg"] = 1.0 # AutoFlag time averaging in min.
parms["doAutoFlag"] = True # Autoflag editing after first pass calibration?
parms["doAutoFlag2"] = True # Autoflag editing after final (2nd) calibration?
parms["IClip"] = None # AutoFlag Stokes I clipping
parms["VClip"] = [2.0,0.05] # AutoFlag Stokes V clipping
parms["XClip"] = [5.0,0.05] # AutoFlag cross-pol clipping
parms["timeAvg"] = 0.33 # AutoFlag time averaging in min.
parms["doAFFD"] = True # do AutoFlag frequency domain flag
parms["FDwidMW"] = 31 # Width of the median window
parms["FDmaxRMS"] = None # Channel RMS limits (Jy)
parms["FDmaxRes"] = None # Max. residual flux in sigma
parms["FDmaxResBL"] = None # Max. baseline residual
parms["FDbaseSel"] = None # Channels for baseline fit
parms["FDmaxAmp"] = None # Maximum average amplitude (Jy)
parms["FDmaxV"] = parms["VClip"][0] # Maximum average VPol amp (Jy)
parms["minAmp"] = 1.0e-5 # Minimum allowable amplitude
parms["BChDrop"] = None # number of channels to drop from start of each spectrum
# NB: based on original number of channels, halved for Hanning
parms["EChDrop"] = None # number of channels to drop from end of each spectrum
# NB: based on original number of channels, halved for Hanning
parms["doSrvrEdt"] = True # Survivor editing
parms["minSrvrOK"] = [0.1,0.1] # Minimum prior editing survival fraction
parms["ampSigma"] = None # Multiple of median RMS about median gain to clip/flag
# Delay calibration
parms["doDelayCal"] = True # Determine/apply delays from contCals
parms["delaySolInt"] = None # delay solution interval (min)
parms["delaySmoo"] = None # Delay smoothing time (hr)
parms["doTwo"] = True # Use two baseline combinations in delay cal
parms["delayZeroPhs"] = False # Zero phase in Delay solutions?
parms["delayBChan"] = None # first channel to use in delay solutions
parms["delayEChan"] = None # highest channel to use in delay solutions
parms["delayDoSelf"] = False # If True only apply solutions to the same source
# Bandpass Calibration?
parms["doBPCal"] = True # Determine Bandpass calibration
parms["bpBChan1"] = 1 # Low freq. channel, initial cal
parms["bpEChan1"] = 0 # Highest freq channel, initial cal, 0=>all
parms["bpDoCenter1"] = None # Fraction of channels in 1st, overrides bpBChan1, bpEChan1
parms["bpBChan2"] = 1 # Low freq. channel for BP cal
parms["bpEChan2"] = 0 # Highest freq channel for BP cal, 0=>all
parms["bpChWid2"] = None # Number of channels in running mean BP soln
parms["bpChWid2"] = 1 # Number of channels in running mean BP soln
parms["bpdoAuto"] = False # Use autocorrelations rather than cross?
parms["bpsolMode"] = 'A&P' # Band pass type 'A&P', 'P', 'P!A'
parms["bpsolint1"] = None # BPass phase correction solution in min
parms["bpsolint2"] = 10.0 # BPass bandpass solution in min
parms["bpUVRange"] = [0.0,0.0] # uv range for bandpass cal
parms["specIndex"] = -0.7 # Spectral index of BP Cal
parms["doSpecPlot"] = True # Plot the amp. and phase across the spectrum
# Amp/phase calibration parameters
parms["refAnt"] = 0 # Reference antenna
parms["refAnts"] = [0] # List of Reference antenna for fringe fitting
parms["solInt"] = None # solution interval (min)
parms["ampScalar"]= False # Ampscalar solutions?
parms["solSmo"] = 0.0 # Smoothing interval for Amps (min)
parms["gainUVRange"] = [0.0,0.0] # Range of baseline used in kilowavelengths, zeros=all
# Apply calibration and average?
parms["doCalAvg"] = True # calibrate and average cont. calibrator data
parms["avgClass"] = "UVAvg" # AIPS class of calibrated/averaged uv data
parms["CalAvgTime"] = None # Time for averaging calibrated uv data (min)
parms["CABIF"] = 1 # First IF to copy
parms["CAEIF"] = 0 # Highest IF to copy
parms["CABChan"] = 1 # First Channel to copy
parms["CAEChan"] = 0 # Highest Channel to copy
parms["CAchAvg"] = 1 # No channel average
parms["CAavgFreq"] = 1 # No channel average
# Right-Left delay calibration
parms["doRLDelay"] = False # Determine/apply R-L delays
parms["RLDCal"] = [(None,None,None)] # Array of triplets of (name, R-L phase (deg at 1 GHz),
# RM (rad/m**2)) for calibrators
parms["rlBChan"] = 1 # First (1-rel) channel number
parms["rlEChan"] = 0 # Highest channel number. 0=> high in data.
parms["rlUVRange"] = [0.0,0.0] # Range of baseline used in kilowavelengths, zeros=all
parms["rlCalCode"] = ' ' # Calibrator code
parms["rlDoCal"] = 2 # Apply calibration table? positive=>calibrate
parms["rlgainUse"] = 0 # CL/SN table to apply, 0=>highest
parms["rltimerange"]= [0.0,1000.0] # time range of data (days)
parms["rlDoBand"] = 1 # If > 0 apply bandpass calibration
parms["rlBPVer"] = 0 # BP table to apply, 0=>highest
parms["rlflagVer"] = 1 # FG table version to apply
parms["rlrefAnt"] = 0 # R-L, delay calibrator , defaults to refAnt
parms["rlnumIFs"] = 1 # Number of IFs per solution
# Instrumental polarization cal?
parms["doPolCal"] = False # Determine instrumental polarization from PCInsCals?
parms["PCInsCals"] = [] # instrumental poln calibrators, name or list of names
parms["PCFixPoln"] = False # if True, don't solve for source polarization in ins. cal
parms["PCCalPoln"] = None # List of calibrator poln, list of (PPol, RLPhase, RM) in
# order given in PCInsCals, PPol<0 => fit
parms["PCAvgIF"] = False # if True, average IFs in ins. cal.
parms["PCSolInt"] = 2. # instrumental solution interval (min), 0=> scan average(?)
parms["PCRefAnt"] = -1 # Pol cal Reference antenna, -1=> absolute
parms["PCSolType"] = " " # solution type, " ", "LM "
parms["doPol"] = False # Apply polarization cal in subsequent calibration?
parms["PDVer"] = 1 # Apply PD table in subsequent polarization cal?
parms["PCChInc"] = 5 # Channel increment in instrumental polarization
parms["PCChWid"] = 5 # Channel averaging in instrumental polarization
parms['doFitRL'] = False # Fit R-L (or X-Y) gain phase
parms['doFitOri'] = True # Fit (linear feed) orientations?
# Right-Left phase (EVPA) calibration, uses same values as Right-Left delay calibration
parms["doRLCal"] = False # Set RL phases from RLCal - RLDCal or RLPCal
parms["RLPCal"] = None # RL Calibrator source name, in None no IF based cal.
parms["RLPhase"] = 0.0 # R-L phase of RLPCal (deg) at 1 GHz
parms["RLRM"] = 0.0 # R-L calibrator RM (NYI)
parms["rlChWid"] = 3 # Number of channels in running mean RL BP soln
parms["rlsolint1"] = 10./60 # First solution interval (min), 0=> scan average
parms["rlsolint2"] = 10.0 # Second solution interval (min)
parms["rlCleanRad"] = None # CLEAN radius about center or None=autoWin
parms["rlFOV"] = 0.05 # Field of view radius (deg) needed to image RLPCal
# Imaging targets
parms["doImage"] = True # Image targets
parms["targets"] = [] # List of target sources
parms["outIClass"] = "IClean" # Output target final image class
parms["Stokes"] = "I" # Stokes to image
parms["timeRange"] = [0.0,0.0] # time range (days) to image
parms["Robust"] = 0.0 # Weighting robust parameter
parms["FOV"] = None # Field of view radius in deg.
parms["Niter"] = 500 # Max number of clean iterations
parms["minFlux"] = 0.0 # Minimum CLEAN flux density
parms["UVRange"] = [0.,0.] # Imaging UV range in kLambda, 0s => all
parms["minSNR"] = 4.0 # Minimum Allowed SNR
parms["solPMode"] = "P" # Phase solution for phase self cal
parms["solPType"] = " " # Solution type for phase self cal
parms["solAMode"] = "A&P" # Delay solution for A&P self cal
parms["solAType"] = " " # Solution type for A&P self cal
parms["avgPol"] = True # Average poln in self cal?
parms["avgIF"] = False # Average IF in self cal?
parms["maxPSCLoop"] = 1 # Max. number of phase self cal loops
parms["minFluxPSC"] = 0.05 # Min flux density peak for phase self cal
parms["solPInt"] = None # phase self cal solution interval (min)
parms["maxASCLoop"] = 1 # Max. number of Amp+phase self cal loops
parms["minFluxASC"] = 0.5 # Min flux density peak for amp+phase self cal
parms["solAInt"] = None # amp+phase self cal solution interval (min)
parms["nTaper"] = 0 # Number of additional imaging multiresolution tapers
parms["Tapers"] = [20.0,0.0] # List of tapers in pixels
parms["do3D"] = False # Make ref. pixel tangent to celest. sphere for each facet
parms["noNeg"] = False # F=Allow negative components in self cal model
parms["BLFact"] = 1.01 # Baseline dependent time averaging
parms["BLchAvg"] = True # Baseline dependent frequency averaging
parms["doMB"] = True # Use wideband imaging?
parms["MBnorder"] = 1 # order on wideband imaging
parms["MBmaxFBW"] = 0.05 # max. MB fractional bandwidth
parms["PBCor"] = True # Pri. beam corr on final image
parms["antSize"] = 24.5 # ant. diameter (m) for PBCor
parms["CleanRad"] = None # CLEAN radius (pix?) about center or None=autoWin
parms["Beam"] = [0.,0.,0.] # Clean restoring beam (asec, asec, deg)
parms["doOutlier"] = None # Default outliers
parms["OutlierDist"] = None # Outlier max distance (deg)
parms["OutlierFlux"] = None # Outlier min flux (Jy)
# Final
parms["doReport"] = True # Generate source report?
parms["outDisk"] = 0 # FITS disk number for output (0=cwd)
parms["doSaveUV"] = True # Save uv data
parms["doSaveImg"] = True # Save images
parms["doSaveTab"] = True # Save Tables
parms["doCleanup"] = True # Destroy AIPS files
# diagnostics
parms["plotSource"] = 'None' # Name of source for spectral plot
parms["plotTime"] = [0.,1000.] # timerange for spectral plot
parms["doRawSpecPlot"] = False # Plot diagnostic raw spectra?
parms["doSpecPlot"] = False # Plot diagnostic spectra?
parms["doSNPlot"] = True # Plot SN tables etc
parms["doBPPlot"] = True # Plot BP tables etc
parms["doDiagPlots"] = True # Plot single source diagnostics
parms["doKntrPlots"] = True # Contour plots
parms["prtLv"] = 2 # Amount of task print diagnostics
parms["doMetadata"] = True # Save source and project metadata
parms["doHTML"] = True # Output HTML report
# Default selection
parms["selChBW"] = -1.0 # Selected channel bandwidth, All
# send pass defaults
parms["doMedn2"] = None
parms["doFD2"] = None
parms["doBPCal2"] = None
parms["doDelayCal2"] = None
parms["doAmpPhaseCal2"] = None
parms["doAutoFlag2"] = None
parms["doSrvrEdt2"] = None
return parms
# end EVLAInitContParms
def EVLAInitContFQParms(parms):
"""
Initialize EVLA continuum pipeline frequency dependent parameters
Values set if None on input
* parms = Project parameters, modified on output
"""
################################################################
freq = parms["VLAFreq"]
cfg = parms["VLACfg"]
nchan = parms["selChan"]
doHann = parms["doHann"]
# halve the number of channels if Hanning
# Delay channels
parms["delayBChan"] = max(2, 0.05*nchan) # first channel to use in delay solutions
parms["delayEChan"] = min(nchan-2, nchan-0.05*nchan) # highest channel to use in delay solutions
# Amp cal channels
if parms["doAmpPhaseCal"]==None:
parms["doAmpPhaseCal"] = True # Amplitude/phase calibration
parms["ampBChan"] = max(2, 0.05*nchan) # first channel to use in A&P solutions
parms["ampEChan"] = min(nchan-2, nchan-0.05*nchan) # highest channel to use in A&P solutions
parms["doAmpEdit"] = True # Edit/flag on the basis of amplitude solutions
parms["ampEditFG"] = 2 # FG table to add flags to, <=0 -> no FG entries
# Ipol clipping levels
if parms["IClip"]==None:
if freq<1.0e9:
parms["IClip"] = [20000.,0.1] # Allow Cygnus A
else:
parms["IClip"] = [200.,0.1] # Covers most real sources
# end IPol clipping
if (parms["FDmaxAmp"]==None):
parms["FDmaxAmp"] = parms["IClip"][0] # Maximum average amplitude (Jy)
# Drop end channels, more for low frequencies
if freq<8.0e9:
if parms["BChDrop"]==None:
ch = int (max(2, 6.*(nchan/64.)+0.5))
ch = min (32, ch)
parms["BChDrop"] = ch # number of channels to drop from start of each spectrum
if parms["EChDrop"]==None:
ch = int (max(2, 4.*(nchan/64.)+0.5))
ch = min (24, ch)
parms["EChDrop"] = ch # number of channels to drop from start of each spectrum
else:
if parms["BChDrop"]==None:
parms["BChDrop"] = 3 # drop no channels
if parms["EChDrop"]==None:
parms["EChDrop"] = 2 # drop no channels
# Set spectral baseline for FD flagging ignoring end channels
if parms["FDbaseSel"]==None:
ch1 = parms["BChDrop"]
ch2 = nchan - parms["EChDrop"]
parms["FDbaseSel"] = [ch1, ch2, 1, 0]
# FD flagging
# number of channels for FD median window
if (parms["FD1widMW"]==None):
parms["FD1widMW"] = MIN (127, MAX(((int(nchan//2))-1), 3))
if parms["FDmaxRMS"]==None:
if cfg[0:1]=='A' or cfg[0:1]=='B' or freq>8.0e9:
parms["FDmaxRMS"] = [5.0,.1] # Channel RMS limits (Jy)
else:
parms["FDmaxRMS"] = [6.0,.1] # Channel RMS limits (Jy)
if parms["FDmaxRes"]==None:
if cfg[0:1]=='A' or cfg[0:1]=='B' or freq>8.0e9:
parms["FDmaxRes"] = 6.0 # Max. residual flux in sigma
else:
parms["FDmaxRes"] = 5.0 # Max. residual flux in sigma
if parms["FDmaxResBL"]==None:
if cfg[0:1]=='A' or cfg[0:1]=='B' or freq>4.0e9:
parms["FDmaxResBL"] = 6.0 # Max. baseline residual
else:
parms["FDmaxResBL"] = 5.0 # Max. baseline residual
# Averaging time by configuration
if cfg[0:1]=='A':
if parms["calInt"]==None:
parms["calInt"] = 0.30 # Calibration table interval (min)
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 1.0/60.0 # Time for averaging calibrated uv data (min)
if parms["doShad"]==None:
parms["doShad"] = False # Shadow flagging (config dependent)
if parms["doHann"]==None:
parms["doHann"] = False # Hanning needed for RFI?
if parms["delaySolInt"]==None:
parms["delaySolInt"] = 10.0/60. # delay solution interval (min)
if parms["solInt"]==None:
parms["solInt"] = 30.0/60. # solution interval (min)
elif cfg[0:1]=='B':
if parms["calInt"]==None:
parms["calInt"] = 0.45 # Calibration table interval (min)
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 3.0/60.0 # Time for averaging calibrated uv data (min)
if parms["doShad"]==None:
parms["doShad"] = False # Shadow flagging (config dependent)
if parms["doHann"]==None:
parms["doHann"] = freq<8.0e9 # Hanning needed for RFI?
if parms["delaySolInt"]==None:
parms["delaySolInt"] = 10.0/60 # delay solution interval (min)
if parms["solInt"]==None:
parms["solInt"] = 30.0/60. # solution interval (min)
elif cfg[0:1]=='C':
if parms["calInt"]==None:
parms["calInt"] = 1.0 # Calibration table interval (min)
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
if parms["doShad"]==None:
parms["doShad"] = True # Shadow flagging (config dependent)
if parms["doHann"]==None:
parms["doHann"] = freq<8.0e9 # Hanning needed for RFI?
if parms["delaySolInt"]==None:
parms["delaySolInt"] = 20.0/60. # delay solution interval (min)
if parms["solInt"]==None:
parms["solInt"] = 30.0/60. # solution interval (min)
elif cfg[0:1]=='D':
if parms["calInt"]==None:
parms["calInt"] = 2.0 # Calibration table interval (min)
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 20.0/60.0 # Time for averaging calibrated uv data (min)
if parms["doShad"]==None:
parms["doShad"] = True # Shadow flagging (config dependent)
if parms["doHann"]==None:
parms["doHann"] = freq<8.0e9 # Hanning needed for RFI?
if parms["delaySolInt"]==None:
parms["delaySolInt"] = 30.0/60. # delay solution interval (min)
if parms["solInt"]==None:
parms["solInt"] = 30.0/60. # solution interval (min)
# Frequency dependent values
FWHM = (45.0 /(freq*1.0e-9) ) / 60. # FWHM in deg
if parms["band"]==None:
parms["band"] = EVLAGetBandLetter(freq)
if freq<1.0e9: # Below L band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.5*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.10 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 5 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 10.0 # Multiple of median RMS about median gain to clip/flag
elif freq<2.0e9: # L band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 15.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.5*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 10.0 # Multiple of median RMS about median gain to clip/flag
elif freq<3.0e9: # S band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.5*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 10.0 # Multiple of median RMS about median gain to clip/flag
elif freq<8.0e9: # C band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.5*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 15.0 # Multiple of median RMS about median gain to clip/flag
elif freq<10.0e9: # X band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.5*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 20.0 # Multiple of median RMS about median gain to clip/flag
elif freq<18.0e9: # Ku band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.5*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 20.0 # Multiple of median RMS about median gain to clip/flag
elif freq<26.0e9: # K band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.25*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 20.0 # Multiple of median RMS about median gain to clip/flag
elif freq<38.0e9: # Ka band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.25 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.25*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 20.0 # Multiple of median RMS about median gain to clip/flag
elif freq<50.0e9: # Q band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 5.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.25*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.10 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 20.0 # Multiple of median RMS about median gain to clip/flag
# Should be fairly large
else: # Above Q band
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 5.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.25*FWHM # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.10 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["bpChWid2"] == None:
parms["bpChWid2"] = 3 # Number of channels in running mean BP soln
if parms["ampSigma"]==None:
parms["ampSigma"] = 20.0 # Multiple of median RMS about median gain to clip/flag
# end EVLAInitContFqParms
def EVLAClearCal(uv, err, doGain=True, doBP=False, doFlag=False, saveSY=False,
check=False, logfile=""):
"""
Clear previous calibration
Delete all SN tables, all CL but CL 1?
if saveSY and an SY table ver>1 exists save SN 1, CL 1&2
* uv = UV data object to clear
* err = Obit error/message stack
* doGain = If True, delete SN and CL tables
* doBP = If True, delete BP tables
* doFlag = If True, delete FG tables except FG=1
* saveSY = If True, save the potential results of SY calibration
* check = Only check script, don't execute tasks
"""
################################################################
# Only checking?
if check:
return
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
# Need to check for SY calibration?
ver = uv.GetHighVer("AIPS SY")
if (ver>1) and saveSY:
minSN = 1; minCL = 2;
else:
minSN = 0; minCL = 1;
# Gain tables
if doGain:
ver = uv.GetHighVer("AIPS SN")
while (ver>minSN):
mess = "Delete SN table %d" % (ver)
printMess(mess, logfile)
uv.ZapTable ('AIPS SN', ver, err)
ver = ver-1
ver = uv.GetHighVer("AIPS CL")
while (ver>minCL):
mess = "Delete CL table %d" % (ver)
printMess(mess, logfile)
uv.ZapTable ('AIPS CL', ver, err)
ver = ver-1
# Bandpass
if doBP:
mess = "Delete Delete all BP tables"
printMess(mess, logfile)
uv.ZapTable("AIPS BP",-1,err)
# Flag tables
if doFlag:
ver = uv.GetHighVer("AIPS FG")
while (ver>1):
mess = "Delete FG table %d" % (ver)
printMess(mess, logfile)
uv.ZapTable ('AIPS FG', ver, err)
ver = ver-1
OErr.printErrMsg(err, "EVLAClearCal: Error reseting calibration")
# end EVLAClearCal
def EVLACopyFG(uv, err, logfile='', check=False, debug = False):
"""
Copy AIPS FG table from 1 to 2
Returns task error code, 0=OK, else failed
* uv = UV data object to copy
* err = Obit error/message stack
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
taco = ObitTask.ObitTask("TabCopy")
try:
taco.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv, taco)
taco.outName = taco.inName
taco.outClass = taco.inClass
taco.outDisk = taco.inDisk
taco.outSeq = taco.inSeq
taco.inTab = "AIPS FG"
taco.inVer = 1
taco.outVer = 2
taco.taskLog = logfile
if debug:
taco.debug = debug
taco.i
# Trap failure
try:
if not check:
taco.g
except Exception as exception:
print(exception)
mess = "Copy of FG table Failed retCode="+str(taco.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLACopyFG
def EVLACopyTable(inObj, outObj, inTab, err, inVer=1, outVer=0,
logfile='', check=False, debug = False):
"""
Copy AIPS Table
Returns task error code, 0=OK, else failed
* inObj = Input Object (UV or Image)
* outObj = Output object
* inTab = Table type, e.g. "AIPS AN"
* err = Obit error/message stack
* inVer = intput version
* outVer = output version
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
mess = "Copy "+inTab+" Table "+str(inVer)+" to "+str(outVer)
printMess(mess, logfile)
taco = ObitTask.ObitTask("TabCopy")
try:
taco.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(inObj, taco)
setoname(outObj, taco)
taco.inTab = inTab
taco.inVer = inVer
taco.outVer = outVer
taco.taskLog = logfile
if debug:
taco.debug = debug
taco.i
# Trap failure
try:
if not check:
taco.g
except Exception as exception:
print(exception)
mess = "Copy of "+inTab+" table Failed retCode="+str(taco.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLACopyTable
def EVLAUVLoad(filename, inDisk, Aname, Aclass, Adisk, Aseq, err, logfile=''):
"""
Read FITS uvtab file into AIPS
Returns task error code, 0=OK, else failed
Read a UVTAB FITS UV data file and write an AIPS data set
* filename = name of FITS file
* inDisk = FITS directory number
* Aname = AIPS name of file
* Aclass = AIPS class of file
* Aseq = AIPS sequence number of file, 0=> create new
* Adisk = FITS directory number
* err = Python Obit Error/message stack
* logfile = logfile for messages
returns AIPS UV data object
"""
################################################################
mess = "Load FITS uvtab file into AIPS"
printMess(mess, logfile)
#
# Checks
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Get input
inUV = UV.newPFUV("FITS UV DATA", filename, inDisk, True, err)
if err.isErr:
OErr.printErrMsg(err, "Error with FITS data")
# Get output, create new if seq=0
if Aseq<1:
OErr.printErr(err) # Print any outstanding messages
user = OSystem.PGetAIPSuser()
Aseq=AIPSDir.PHiSeq(Adisk,user,Aname,Aclass,"MA",err)
# If it already exists, increment seq
if AIPSDir.PTestCNO(Adisk,user,Aname,Aclass,"MA",Aseq,err)>0:
Aseq = Aseq+1
OErr.PClear(err) # Clear any message/error
mess = "Creating AIPS UV file "+Aname+"."+Aclass+"."+str(Aseq)+" on disk "+str(Adisk)
printMess(mess, logfile)
outUV = UV.newPAUV("AIPS UV DATA", Aname, Aclass, Adisk, Aseq, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating AIPS data")
# Copy
UV.PCopy (inUV, outUV, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying UV data to AIPS")
# Copy History
inHistory = History.History("inhistory", inUV.List, err)
outHistory = History.History("outhistory", outUV.List, err)
History.PCopyHeader(inHistory, outHistory, err)
# Add history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit uvlod",err)
outHistory.WriteRec(-1,"uvlod / FITS file "+filename+" disk "+str(inDisk),err)
outHistory.Close(err)
#
# Copy Tables
exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL", "History"]
include=[]
UV.PCopyTables (inUV, outUV, exclude, include, err)
del inUV
return outUV # return new object
# end EVLAUVLoad
def EVLAUVLoadT(filename, disk, Aname, Aclass, Adisk, Aseq, err, logfile=" ", \
check=False, debug = False, Compress=False):
"""
Read FITS file into AIPS
Read input uvtab FITS file, write AIPS
Returns Obit uv object, None on failure
* Filename = input FITS uvtab format file
* disk = input FITS file disk number
* Aname = output AIPS file name
* Aclass = output AIPS file class
* Adisk = output AIPS file disk
* Aseq = output AIPS file sequence
* err = Obit error/message stack
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* Compress = Write AIPS data in compressed form?
"""
################################################################
mess = "Load FITS uvtab file into AIPS"
printMess(mess, logfile)
#
uvc = ObitTask.ObitTask("UVCopy")
try:
uvc.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
uvc.DataType = "FITS"
uvc.inFile = filename
uvc.inDisk = disk
uvc.outDType = "AIPS"
uvc.outName = Aname
uvc.outClass = Aclass
uvc.outSeq = Aseq
uvc.outDisk = Adisk
uvc.Compress = Compress
uvc.taskLog = logfile
if debug:
uvc.i
uvc.debug = debug
# Trap failure
try:
if not check:
uvc.g
except Exception as exception:
print(exception)
mess = "UVData load Failed "
printMess(mess, logfile)
else:
pass
# Get output
if uvc.retCode==0:
outuv = UV.newPAUV("UVdata", Aname, Aclass, Adisk, Aseq, True, err)
else:
outUV = None
return outuv
# end EVLAUVLoadT
def EVLAUVLoadArch(dataroot, Aname, Aclass, Adisk, Aseq, err, \
selConfig=-1, selBand="", selChan=0, selNIF=0, selChBW=-1.0, \
dropZero=True, calInt=0.5, doSwPwr=False, Compress=False, \
logfile = "", check=False, debug = False):
"""
Read EVLA archive into AIPS
Read EVLA archive file, write AIPS
Returns Obit uv object, None on failure
* dataroot = root of archive directory structure
* Aname = output AIPS file name
* Aclass = output AIPS file class
* Adisk = output AIPS file disk
* Aseq = output AIPS file sequence
* err = Obit error/message stack
* selBand = Selected band, def first
* selChan = Selected number of channels, def first
* selNIF = Selected number of IFs, def first
* selChBW = Selected channel BW (kHz)
* dropZero = If true drop records with all zeroes
* calInt = CL table interval
* doSwPwr = Make EVLA Switched power corr?
* Compress = Write AIPS data in compressed form?
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
mess = "Load archive file into AIPS"
printMess(mess, logfile)
#
outuv = None
bdf = ObitTask.ObitTask("BDFIn")
try:
bdf.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
bdf.DataRoot = dataroot
bdf.DataType = "AIPS"
bdf.outName = Aname[0:12]
bdf.outClass = Aclass[0:6]
bdf.outSeq = Aseq
bdf.outDisk = Adisk
bdf.selConfig= selConfig
bdf.selBand = selBand
bdf.selChan = selChan
bdf.selIF = selNIF
if ("selChBW" in bdf.__dict__):
bdf.selChBW = selChBW
bdf.dropZero = dropZero
bdf.calInt = calInt
bdf.doSwPwr = doSwPwr
bdf.Compress = Compress
bdf.taskLog = logfile
if debug:
bdf.i
bdf.debug = debug
# Trap failure
try:
if not check:
bdf.g
except Exception as exception:
print(exception)
mess = "UVData load Failed "
printMess(mess, logfile)
else:
pass
# Get output
if bdf.retCode==0:
outuv = UV.newPAUV("UVdata", Aname, Aclass, Adisk, Aseq, True, err)
else:
outUV = None
# Dummy entry to ensure FG table 1
if not check:
UV.PFlag (outuv, err, timeRange=[1.0e20,1.0e21], Ants=[999,0], Reason="Dummy flag")
# Scan listing
listr = ObitTask.ObitTask("Lister")
try:
listr.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
setname(outuv,listr)
listr.taskLog = logfile
listr.prtFile = logfile
listr.doCrt = -1 # To log file
try:
if not check:
listr.g
except Exception as exception:
print(exception)
mess = "Lister Failed retCode="+str(listr.retCode)
printMess(mess, logfile)
else:
pass
return outuv
# end EVLAUVLoadArch
def EVLAHann(inUV, Aname, Aclass, Adisk, Aseq, err, doDescm=True, \
logfile='', check=False, debug=False):
""" Hanning smooth a file to AIPS
Returns task error code, 0=OK, else failed
inUV = UV data to smooth
Aname = AIPS name of file
Aclass = AIPS class of file
Aseq = AIPS sequence number of file, 0=> create new
Adisk = FITS directory number
doDescm = If True descimate (drop alternate) channels
err = Python Obit Error/message stack
check = Only check script, don't execute tasks
debug = Run tasks debug, show input
logfile = logfile for messages
returns AIPS UV data object, None on failure
"""
################################################################
mess = "Hanning smooth data"
printMess(mess, logfile)
#
hann=ObitTask.ObitTask("Hann")
try:
hann.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
setname(inUV,hann)
if check:
return inUV
hann.outName = Aname[0:12]
hann.outClass = Aclass[0:6]
hann.outSeq = Aseq
hann.outDisk = Adisk
hann.flagVer = -1
hann.doDescm = doDescm
hann.taskLog = logfile
hann.debug = debug
if debug:
hann.i
# Trap failure
try:
if not check:
hann.g
except Exception as exception:
print(exception)
mess = "Median flagging Failed retCode="+str(hann.retCode)
printMess(mess, logfile)
return None
else:
pass
# Get output
outUV = UV.newPAUV("AIPS UV DATA", Aname, Aclass, Adisk, Aseq, True, err)
if err.isErr:
mess = "Error Getting Hanning smoothed data"
printMess(mess, logfile)
return None
return outUV
# end EVLAHann
def EVLAImFITS(inImage, filename, outDisk, err, fract=None, quant=None, \
exclude=["AIPS HI","AIPS PL","AIPS SL"], include=["AIPS CC"],
headHi=False, logfile=""):
"""
Write AIPS image as FITS
Write a Image data set as a FITAB format file
History also copied
* inImage = Image data to copy
* filename = name of FITS file, any whitespace characters replaced with underscore
* outDisk = FITS directory number
* err = Python Obit Error/message stack
* fract = Fraction of RMS to quantize
* quant = quantization level in image units, has precedence over fract
None or <= 0 => use fract.
* exclude = List of table types NOT to copy
NB: "AIPS HI" isn't really a table and gets copied anyway
* include = List of table types to copy
* headHi = if True move history to header, else leave in History table
"""
################################################################
mess = "Write Image to FITS "+filename+" on disk "+str(outDisk)
printMess(mess, logfile)
#
# Checks
if not Image.PIsA(inImage):
raise TypeError("inImage MUST be a Python Obit Image")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Deblank filename
fn = re.sub('\s','_',filename)
# Set output
outImage = Image.newPFImage("FITS Image DATA", fn, outDisk, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating FITS data")
# Check for valid pixels
if inImage.Desc.Dict["maxval"]<=inImage.Desc.Dict["minval"]:
fract=None; quant=None
# Copy
if fract or quant:
Image.PCopyQuantizeFITS (inImage, outImage, err, fract=fract, quant=quant)
else:
Image.PCopy (inImage, outImage, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying Image data to FITS")
# Copy History
inHistory = History.History("inhistory", inImage.List, err)
outHistory = History.History("outhistory", outImage.List, err)
History.PCopy(inHistory, outHistory, err)
# Add this programs history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit imtab",err)
if fract:
outHistory.WriteRec(-1,"imtab / Quantized at "+str(fract)+" RMS",err)
outHistory.WriteRec(-1,"imtab / FITS file "+fn+", disk "+str(outDisk),err)
outHistory.Close(err)
# History in header?
if headHi:
# Copy back to header
inHistory = History.History("inhistory", outImage.List, err)
History.PCopy2Header (inHistory, outHistory, err)
# zap table
outHistory.Zap(err)
OErr.printErrMsg(err, "Error with history")
# Copy Tables
Image.PCopyTables (inImage, outImage, exclude, include, err)
del outImage
# end EVLAImFITS
def EVLAUVFITS(inUV, filename, outDisk, err, compress=False, \
exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL"], \
include=[], headHi=False, logfile=""):
"""
Write UV data as FITS file
Write a UV data set as a FITAB format file
History written to header
* inUV = UV data to copy
* filename = name of FITS file, any whitespace characters replaced with underscore
* outDisk = FITS directory number
* err = Python Obit Error/message stack
* exclude = List of table types NOT to copy
NB: "AIPS HI" isn't really a table and gets copied anyway
* include = List of table types to copy (FQ, AN always done )
Exclude has presidence over include
* headHi = if True move history to header, else leave in History table
returns FITS UV data object
"""
################################################################
mess = "Write Data to FITS UV data "+filename+" on disk "+str(outDisk)
printMess(mess, logfile)
# Checks
if not UV.PIsA(inUV):
raise TypeError("inUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Deblank filename
fn = re.sub('\s','_',filename)
# Set output
outUV = UV.newPFUV("FITS UV DATA", fn, outDisk, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating FITS data")
#Compressed?
if compress:
inInfo = UV.PGetList(outUV) #
dim = [1,1,1,1,1]
InfoList.PAlwaysPutBoolean (inInfo, "Compress", dim, [True])
# Copy
UV.PCopy (inUV, outUV, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying UV data to FITS")
# History
inHistory = History.History("inhistory", outUV.List, err)
outHistory = History.History("outhistory", outUV.List, err)
# Add history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit uvtab",err)
outHistory.WriteRec(-1,"uvtab / FITS file "+fn+" disk "+str(outDisk),err)
outHistory.Close(err)
# History in header?
if headHi:
History.PCopy2Header (inHistory, outHistory, err)
OErr.printErrMsg(err, "Error with history")
# zap table
outHistory.Zap(err)
# Copy Tables
UV.PCopyTables (inUV, outUV, exclude, include, err)
return outUV # return new object
# end EVLAUVFITS
def EVLAUVFITSTab(inUV, filename, outDisk, err, \
exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL"], \
include=[], logfile=""):
"""
Write Tables on UV data as FITS file
Write Tables from a UV data set (but no data) as a FITAB format file
History written to header
* inUV = UV data to copy
* filename = name of FITS file, any whitespace characters replaced with underscore
* outDisk = FITS directory number
* err = Python Obit Error/message stack
* exclude = List of table types NOT to copy
NB: "AIPS HI" isn't really a table and gets copied anyway
* include = List of table types to copy (FQ, AN always done )
Exclude has presidence over include
returns FITS UV data object
"""
################################################################
mess = "Write Tables to FITS UV data "+filename+" on disk "+str(outDisk)
printMess(mess, logfile)
# Checks
if not UV.PIsA(inUV):
raise TypeError("inUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Deblank filename
fn = re.sub('\s','_',filename)
# Set output
outUV = UV.newPFUV("FITS UV DATA", fn, outDisk, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating FITS data")
# Clone
UV.PClone (inUV, outUV, err)
if err.isErr:
OErr.printErrMsg(err, "Error cloning UV data to FITS")
# Copy back to header
inHistory = History.History("inhistory", outUV.List, err)
outHistory = History.History("outhistory", outUV.List, err)
# Add history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit uvTabSave",err)
outHistory.WriteRec(-1,"uvTabSave / FITS file "+fn+" disk "+str(outDisk),err)
outHistory.Close(err)
History.PCopy2Header (inHistory, outHistory, err)
OErr.printErrMsg(err, "Error with history")
# zap table
outHistory.Zap(err)
# Copy Tables
UV.PCopyTables (inUV, outUV, exclude, include, err)
return outUV # return new object
# end EVLAUVFITSTab
def EVLADropChan(uv, BChDrop, EChDrop, err, flagVer=2, \
logfile='', check=False, debug=False):
"""
Drop end channels from each spectrum (IF)
Returns 0=OK, else failed
* uv = UV data object to copy
* BChDrop = number of channels to drop from the beginning of each IF
* EChDrop = number of channels to drop from the end of each IF
* flagVer = flag table version
* err = Obit error/message stack
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
if check:
return 0
if BChDrop>0:
UV.PFlag(uv,err,flagVer=flagVer, Chans=[1,max(1,BChDrop)], IFs=[1,0], \
Reason="End Channels")
if EChDrop>0:
d = uv.Desc.Dict
nchan = int(d["inaxes"][d["jlocf"]])
ch = nchan - EChDrop + 1
UV.PFlag(uv,err,flagVer=flagVer, Chans=[ch, nchan], IFs=[1,0], \
Reason="End Channels")
OErr.printErrMsg(err, "Error Flagging")
return 0
# end EVLADropChan
def EVLAMedianFlag(uv, target, err, \
flagTab=2, flagSig=10.0, alpha=0.5, timeWind=2.0, \
doCalib=0, gainUse=0, doBand=0, BPVer=0, flagVer=-1, \
avgTime=0, avgFreq=0, chAvg=1, \
check=False, debug = False, \
nThreads=1, noScrat=[], logfile = ""):
"""
Does Median window flagging
Flag data based on deviations from a running median
See documentation for task MednFlag for details
Returns task error code, 0=OK, else failed
* uv = UV data object to flag
* target = Target source name or list of names, blank = all
* err = Obit error/message stack
* flagTab = Output Flagging table version
* flagSig = Flagging level (sigma)
* alpha = Smoothing parameter
* timeWind = Averaging window (min)
* doCalib = Apply calibration table
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply bandpass cal.
* BPVer = Bandpass table version
* flagVer = Input Flagging table version
* avgTime = preaveraging time (min)
* avgFreq = 1=>avg chAvg chans, 2=>avg all chan, 3=> avg chan and IFs
* chAvg = number of channels to average
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* nThreads = Number of threads to use
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
mess = "Median Window flagging"
printMess(mess, logfile)
medn=ObitTask.ObitTask("MednFlag")
try:
medn.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
setname(uv,medn)
if type(target)==list:
medn.Sources=target
else:
medn.Sources=[target]
medn.flagTab = flagTab
medn.flagSig = flagSig
medn.alpha = alpha
medn.timeWind = timeWind
medn.doCalib = doCalib
medn.gainUse = gainUse
medn.doBand = doBand
medn.BPVer = BPVer
medn.avgTime = avgTime
medn.avgFreq = avgFreq
medn.chAvg = chAvg
medn.flagVer = flagVer
medn.nThreads = nThreads
medn.taskLog = logfile
medn.noScrat = noScrat
medn.debug = debug
#bombaroonie = BombsAwayWithCurtisLemay # DEBUG
if debug:
medn.i
# Trap failure
try:
if not check:
medn.g
except Exception as exception:
print(exception)
mess = "Median flagging Failed retCode="+str(medn.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLAMedianFlag
def EVLAQuack(uv, err, \
Stokes = " ", BIF=1, EIF=0, Sources=[" "], FreqID=0, \
subA=0, timeRange=[0.,0.], Antennas=[0], flagVer=2, \
check=False, debug = False, \
begDrop=0.0, endDrop=0.0, Reason="Quack", logfile = ""):
"""
Flags beginning and end of each scan
Trim start and end of each selected scan,
nothing done if begDrop=endDrop=0.0
See documentation for task Quack for details
Returns task error code, 0=OK, else failed
* uv = UV data object to flag
* err = Obit error/message stack
* Stokes = Limit flagging by Stokes
* BIF = Limit flagging to BIF-EIF
* EIF = Limit flagging
* Sources = Sources selected
* subA = Subarray number 0=>all
* FreqID = Freq. ID to flag. -1=>all
* timeRange= Time range to process
* Antennas = List of antennas to include
* flagVer = Flag table version, 0 => highest
* begDrop = Time (min) to drop from beginning
* endDrop = Time (min) to drop from end
* Reason = Reason (max 24 char.)
* logfile = Log file for task
"""
################################################################
# Anything to do?
if (begDrop<=0) and (endDrop<=0):
return 0
mess = "Quack data"
printMess(mess, logfile)
quack=ObitTask.ObitTask("Quack")
try:
quack.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv, quack)
quack.Stokes = Stokes
quack.BIF = BIF
quack.EIF = EIF
quack.Sources = Sources
quack.subA = subA
quack.FreqID = FreqID
quack.timeRange = timeRange
quack.Antennas = Antennas
quack.flagVer = flagVer
quack.begDrop = begDrop
quack.endDrop = endDrop
quack.Reason = Reason
quack.taskLog = logfile
if debug:
quack.i
quack.debug = debug
# Trap failure
try:
if not check:
quack.g
except Exception as exception:
print(exception)
mess = "Quack Failed retCode= "+str(quack.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLAQuack
def EVLAShadow(uv, err, shadBl=25.0, flagVer=2, \
check=False, debug=False, logfile = ""):
"""
Flags antennas shadowed by others
See documentation for task Obit/UVFlag for details
Returns task error code, 0=OK, else failed
* uv = UV data object to flag
* err = Obit error/message stack
* shadBL = Minimum shadowing baseline (m)
* flagVer = Flag table version, 0 => highest
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* logfile = Log file for task
"""
################################################################
mess = "Shadow flag data"
printMess(mess, logfile)
uvflg=ObitTask.ObitTask("UVFlag")
try:
uvflg.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv, uvflg)
uvflg.opCode = "SHAD"
uvflg.minShad = shadBl
uvflg.flagTab = flagVer
uvflg.Reason = "Shadowed"
uvflg.taskLog = logfile
if debug:
uvflg.prtLv = 4
uvflg.debug = debug
uvflg.i
# Trap failure
try:
if not check:
uvflg.g
except Exception as exception:
print(exception)
mess = "UVFlag Failed retCode= "+str(uvflg.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLAShadow
def EVLAAutoFlag(uv, target, err, \
doCalib=0, gainUse=0, doBand=0, BPVer=0, flagVer=-1, \
flagTab=2, VClip=[0.0,0.0], IClip=[0.0,0.0], XClip=[0.0,0.0], minAmp=0.0, \
RMSClip=[0.0,0.0], RMSAvg=0.0, maxBad=0.25 ,timeAvg=1.0, \
doFD=False, FDmaxAmp=0.0, FDmaxV=0.0, FDwidMW=5, FDmaxRMS=[0.0,0.0], \
FDmaxRes=6.0, FDmaxResBL=6.0, FDbaseSel=[0, 0, 0, 0], \
nThreads=1, check=False, debug=False, noScrat=[], logfile = ""):
"""
Does Automated flagging
Flag data based on any of a number of criteria
See documentation for task AutoFlag for details
Returns task error code, 0=OK, else failed
* uv = UV data object to flag
* target = Target source name or list of names, blank = all
* err = Obit error/message stack
* doCalib = Apply calibration table
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply bandpass cal.
* BPVer = Bandpass table version
* flagVer = Input Flagging table version
* flagTab = Output Flagging table version
* VClip = If > 0.0 VPol clipping level
* IClip = If > 0.0 IPol clipping level
* XClip = If > 0.0 Cross-pol clipping level
* minAmp = Min flux for IClip flagging
* RMSClip = Abs and fractional clip levels for
Time domain RMS filtering
* RMSAvg = Max RMS/Avg for time domain RMS filtering
* maxBad = Maximum fraction of baselines for
correlator or antenna to be
flagged before all are flagged
* timeAvg = Flagging interval (min)
* doFD = do frequency domain editing?
* FDmaxAmp = Maximum average amplitude
* FDmaxV = Maximum average VPol amp
* FDwidMW = Width of the median window
* FDmaxRMS = Channel RMS limits
* FDmaxRes = Max. residual flux in sigma
* FDmaxResBL = Max. baseline residual
* FDbaseSel = Channels for baseline fit (start, end, increment, IF)
* nThreads = Number of threads to use
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
# Test if full poln
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
XClip = None # No X clip if not full poln
# Anything requested?
if (IClip==None or IClip[0]==0.) and (VClip==None or VClip[0]==0.) and \
(XClip==None or XClip[0]==0.) and (RMSClip==None or RMSClip[0]==0.) and \
(doFD==False) and (minAmp==0.0) and (RMSAvg==0.0): \
return 0
mess = "AutoFlag data"
printMess(mess, logfile)
af=ObitTask.ObitTask("AutoFlag")
try:
af.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,af)
if type(target)==list:
af.Sources=target
else:
af.Sources=[target]
af.flagTab = flagTab
af.flagVer = flagVer
af.doCalib = doCalib
af.gainUse = gainUse
af.doBand = doBand
af.BPVer = BPVer
af.VClip = VClip
af.IClip = IClip
if ("XClip" in af.__dict__) and (XClip!=None):
af.XClip = XClip
af.minAmp = minAmp
af.RMSClip = RMSClip
af.RMSAvg = RMSAvg
af.maxBad = maxBad
af.timeAvg = timeAvg
af.doFD = doFD
af.FDmaxAmp = FDmaxAmp
af.FDmaxV = FDmaxV
af.FDwidMW = FDwidMW
af.FDmaxRMS = FDmaxRMS
af.FDmaxRes = FDmaxRes
af.FDmaxResBL = FDmaxResBL
af.FDbaseSel = FDbaseSel
af.nThreads = nThreads
af.noScrat = noScrat
af.taskLog = logfile
if debug:
af.i
af.debug = debug
# Trap failure
try:
if not check:
af.g
except Exception as exception:
print(exception)
mess = "AutoFlag Failed retCode="+str(af.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLAAutoFlag
def EVLASrvrEdt(uv, err, minOK=[0.1,0.1], flagTab=2, target=None, \
doCalib=0, gainUse=0, doBand=0, BPVer=0, flagVer=-1, \
nThreads=1, check=False, debug=False, logfile = ""):
"""
Survivor editing
See documentation for task Obit/UVFlag for details
Returns task error code, 0=OK, else failed
* uv = UV data object to flag
* err = Obit error/message stack
* minOK = Min OK fraction (per IF, record)
* flagTab = Output Flag table version, 0 => new highest
* target = Target source name or list of names, blank/None = all
max. 30 entries
* doCalib = Apply calibration table
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply bandpass cal.
* BPVer = Bandpass table version
* flagVer = Input Flagging table version
* nThreads = Max allowed number of threads
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* logfile = Log file for task
"""
################################################################
mess = "Machine gun the lifeboats!"
printMess(mess, logfile)
se=ObitTask.ObitTask("SrvrEdt")
try:
se.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv, se)
if target!=None:
if type(target)==list:
se.Sources=target
else:
se.Sources=[target]
se.flagVer = flagVer
se.doCalib = doCalib
se.gainUse = gainUse
se.doBand = doBand
se.BPVer = BPVer
se.minOK = minOK
se.flagTab = flagTab
se.nThreads = nThreads
se.taskLog = logfile
if debug:
se.debug = debug
se.i
# Trap failure
try:
if not check:
se.g
except Exception as exception:
print(exception)
mess = "UVFlag Failed retCode= "+str(se.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLASrvrEdt
def EVLAPACor(uv, err, CLver=0, FreqID=0,\
logfile='', check=False, debug=False):
"""
Make parallactic angle correction
Updates CL CLver, if only one, a new one (CL 2) is copied
Returns task error code, 0=OK, else failed
* uv = UV data object
* err = Python Obit Error/message stack
* CLver = Cl version to update, 0=> highest
* FreqID = Frequency group identifier
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
# Don't bother if not full polarization
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
mess = "Skip Parallactic angle corrections - not full stokes"
printMess(mess, logfile)
return 0
# Don't bother if linear feeds
stok0 = d["crval"][d["jlocs"]]
if stok0<-4:
mess = "Skip Parallactic angle corrections - Linear feeds"
printMess(mess, logfile)
return 0
# Which CL?
iCLver = CLver
if iCLver<=0 and not check:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
iCLver = uv.GetHighVer("AIPS CL")
oCLver = iCLver+1; # New output
mess = "Parallactic angle corrections made to CL "+str(oCLver)
printMess(mess, logfile)
clcor = ObitTask.ObitTask("CLCor")
try:
clcor.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,clcor)
clcor.corMode = "PANG"
clcor.calIn = iCLver
clcor.calOut = oCLver
clcor.CLCParm[0] = 1.0
clcor.FreqID = FreqID
clcor.taskLog = logfile
if debug:
clcor.i
clcor.debug = debug
# Trap failure
try:
if not check:
clcor.g
except Exception as exception:
print(exception)
mess = "CLCor Failed "
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLAPACor
def EVLADelayCal(uv,DlyCals, err, solInt=0.5, smoTime=10.0, \
BChan=1, EChan=0, UVRange=[0.,0.], \
timeRange=[0.,0.], FreqID=1, doCalib=-1, gainUse=0, minSNR=5.0, \
refAnts=[0], doBand=-1, BPVer=0, flagVer=-1, doTwo=True, doZeroPhs=False, \
doSelf=False, doPlot=False, plotFile="./DelayCal.ps", \
nThreads=1, noScrat=[], logfile='', check=False, debug=False):
"""
Group delay calibration
Determine delay corrections from a list of calibrators
Solutions optionally smoothed to smoTime
Apply this SN table to the highest CL table writing a new CL table (Obit/CLCal)
Returns task error code, 0=OK, else failed
* uv = UV data object to calibrate
* DlyCals = List of delay calibrators possibly with model
* err = Python Obit Error/message stack
* BChan = First (1-rel channel to include
* EChan = Highest channel to include
* timeRange = timerange of data to use
* UVRange = UV range (klambda) for solutions
* solInt = Calib solution interval (min)
* smoTime = Smoothing time applied to SN table (hr) if >0.0
* FreqID = Frequency group identifier
* minSNR = minimum acceptable SNR in Calib
* refAnts = List of reference antennas
* doCalib = Apply calibration table
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply bandpass cal.
* BPVer = Bandpass table version
* flagVer = Input Flagging table version
* doTwo = If True, use one and two baseline combinations
for delay calibration, else only one baseline
* doSelf = If True only apply solutions to the same source
* doPlot = If True make plots of SN gains
* plotFile = Name of postscript file for plots
* nThreads = Max. number of threads to use
* noScrat = list of disks to avoid for scratch files
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
mess = "Determine parallel hand group delays"
printMess(mess, logfile)
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
# Set output (new) SN table
SNver = uv.GetHighVer("AIPS SN")+1
calib = ObitTask.ObitTask("Calib")
try:
calib.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
OK = False # Must have some work
calib.taskLog = logfile
if not check:
setname(uv,calib)
calib.flagVer = flagVer
calib.timeRange = timeRange
calib.UVRange = UVRange
calib.doCalib = doCalib
calib.gainUse = gainUse
calib.doBand = doBand
calib.BPVer = BPVer
calib.solMode = "DELA"
calib.solType = " "
calib.solInt = solInt
calib.minSNR = minSNR
calib.refAnts = refAnts
calib.solnVer = SNver
calib.noScrat = noScrat
calib.nThreads = nThreads
calib.doTwo = doTwo
# Loop over calibrators
for DlyCal in DlyCals:
print("DlyCal",DlyCal)
calib.Sources[0]= DlyCal["Source"]
calib.DataType2 = DlyCal["CalDataType"]
calib.in2File = DlyCal["CalFile"]
calib.in2Name = DlyCal["CalName"]
calib.in2Class = DlyCal["CalClass"]
calib.in2Seq = DlyCal["CalSeq"]
calib.in2Disk = DlyCal["CalDisk"]
calib.nfield = DlyCal["CalNfield"]
calib.CCVer = DlyCal["CalCCVer"]
calib.BComp = DlyCal["CalBComp"]
calib.EComp = DlyCal["CalEComp"]
calib.Cmethod = DlyCal["CalCmethod"]
calib.Cmodel = DlyCal["CalCmodel"]
calib.Flux = DlyCal["CalFlux"]
calib.Alpha = DlyCal["CalModelSI"]
calib.modelFlux = DlyCal["CalModelFlux"]
calib.modelPos = DlyCal["CalModelPos"]
calib.modelParm = DlyCal["CalModelParm"]
if debug:
calib.prtLv =6
calib.i
calib.debug = debug
# Trap failure
try:
mess = "Run Calib on "+calib.Sources[0]
printMess(mess, logfile)
if not check:
calib.g
except Exception as exception:
print(exception)
mess = "Calib Failed retCode= "+str(calib.retCode)+" Source "+calib.Sources[0]
printMess(mess, logfile)
#return None # Allow some to fail
else:
OK = True
# End loop over calibrators
# Something work?
if not OK:
printMess("All Delay calibration failed", logfile)
return 1
# Open/close UV to update header
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
printMess("Update UV header failed", logfile)
return 1
SNver = uv.GetHighVer("AIPS SN")
# Zero phases?
if doZeroPhs:
sncor = ObitTask.ObitTask("SNCor")
try:
sncor.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv, sncor)
sncor.solnVer = SNver
sncor.corMode = 'ZPHS'
sncor.timeRange = timeRange
sncor.taskLog = logfile
sncor.debug = debug
if debug:
sncor.i
mess = "EVLADelayCal: SNCor: Zero phase in SN "+str(sncor.solnVer)
printMess(mess, logfile)
# Trap failure
try:
if not check:
sncor.g
except Exception as exception:
print(exception)
mess = "SNCor Failed retCode="+str(sncor.retCode)
printMess(mess, logfile)
return 1
else:
pass
# End SNCor
# Smooth if requested
if smoTime>0.0:
snsmo = ObitTask.ObitTask("SNSmo")
try:
snsmo.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv, snsmo)
snsmo.solnIn = SNver
snsmo.solnOut = SNver+1
snsmo.smoType = 'DELA'
snsmo.smoFunc = 'MWF '
snsmo.smoParm = [smoTime,smoTime,smoTime,smoTime,smoTime]
# Clip ay 0.5 nsec from median
snsmo.clipSmo = [5*smoTime,5*smoTime,5*smoTime,5*smoTime,5*smoTime]
snsmo.clipParm = [0.0, 0.0, 0.0, 0.5, 0.5]
snsmo.doBlank = True
snsmo.refAnt = refAnts[0]
snsmo.taskLog = logfile
snsmo.debug = debug
#snsmo.debug = True # DEBUG
#bombaroonie = BombsAwayWithCurtisLemay # DEBUG
if debug:
snsmo.i
mess = "EVLADelayCal: SNSmo SN "+str(snsmo.solnIn)+" to "+str(snsmo.solnOut)
printMess(mess, logfile)
# Trap failure
try:
if not check:
snsmo.g
except Exception as exception:
print(exception)
mess = "SNSmo Failed retCode="+str(snsmo.retCode)
printMess(mess, logfile)
return 1
else:
pass
# End SNSmo
# Open/close UV to update header
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
SNver = uv.GetHighVer("AIPS SN")
# Apply to CL table
retCode = EVLAApplyCal(uv, err, maxInter=1440.0, doSelf=doSelf, \
logfile=logfile, check=check,debug=debug)
if retCode!=0:
return retCode
# Plot fits? Tolerate failure.
if doPlot:
xretCode = EVLAPlotTab(uv, "SN", SNver, err, nplots=6, optype="DELA", \
logfile=logfile, check=check, debug=debug)
if xretCode==0:
xretCode = EVLAWritePlots (uv, 1, 0, plotFile, err, \
plotDesc="Group delay plots", \
logfile=logfile, check=check, debug=debug)
# end SN table plot
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
return 0
# end EVLADelayCal
def EVLASYCal(uv, err, SYVer=1, SYOut=0, calInt=0.1, applyOnly=False, \
smoTime=0.0833, smoFunc="MWF", SWUse=None, \
clipSmo=0.1, clipParm=5., doEdit=False, Sigma=20., editFG=-1,\
doPlot=False, plotFile="./DelayCal.ps", \
nThreads=1, logfile='', check=False, noScrat=[], debug=False):
"""
Gain calibration using Sys power (SY) table
Generate an SN table from operations on an SN Table
Apply SN table to the highest CL table writing a new CL table (Obit/CLCal)
Returns task error code, 0=OK, else failed
* uv = UV data object to calibrate
* err = Python Obit Error/message stack
* SYVer = Input SY table; 0=>highest
* SYOut = Output SY table; 0=>new
* calInt = Interval in output table (min)
* applyOnly = no SY clipping/smoothing, only apply SYOut
* smoTime = Smoothing time (hrs)
* smoFunc = Smoothing function (alpha) "MWF", "BOX", "GAUS"
* clipSmo = Smoothing time (hrs) for clipping
* clipParm = Clipping level about smoothed (sigma)
* doEdit = Edit/flag on the basis of amplitude solutions
* Sigma = Multiple of median RMS about median gain to clip/flag
Should be fairly large
* editFG = FG table to add flags to, <=0 -> no FG entries
* SWUse = if not None, a list of the ref SW per SW to use for
values in SY table
* doPlot = If True make plots of SN gains
* plotFile = Name of postscript file for plots
* nThreads = Number of threads for MWF smoothing
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* noScrat = list of disks to avoid for scratch files
* debug = show input
"""
################################################################
mess = "SysPower gain calibration"
printMess(mess, logfile)
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
# Set output (new) SN table
SNver = uv.GetHighVer("AIPS SN")+1
sygain = ObitTask.ObitTask("SYGain")
try:
sygain.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
OK = False # Must have some work
sygain.taskLog = logfile
if not check:
setname(uv,sygain)
sygain.SYVer = SYVer
sygain.SYOut = SYOut
sygain.solnOut = SNver
sygain.calInt = calInt
sygain.smoFunc = smoFunc
sygain.noScrat = noScrat
if applyOnly:
sygain.smoParm = [0.0]
sygain.clipSmo = [0.0]
sygain.clipParm = [0.0]
else:
sygain.smoParm = [smoTime]
sygain.clipSmo = [clipSmo]
sygain.clipParm = [clipParm]
sygain.nThreads = nThreads
if SWUse!=None:
sygain.SWUse = SWUse
if debug:
sygain.i
sygain.debug = debug
# Trap failure
try:
mess = "Run SYGain"
printMess(mess, logfile)
if not check:
sygain.g
except Exception as exception:
print(exception)
mess = "Sygain Failed retCode= "+str(sygain.retCode)+" Source "+sygain.Sources[0]
printMess(mess, logfile)
#return None # Allow some to fail
else:
OK = True
# Something work?
if not OK:
printMess("SYGain calibration failed", logfile)
return 1
# Open/close UV to update header
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
printMess("Update UV header failed", logfile)
return 1
SNver = uv.GetHighVer("AIPS SN")
# Clip/flag by deviant amplitudes?
if doEdit:
EVLAEditSNAmp(uv, 0, err, sigma=Sigma, FGver=editFG, \
logfile=logfile, check=check, debug=debug)
OErr.printErrMsg(err, "Error clip/flag bad amplitudes")
# end edit
# Apply to CL table
retCode = EVLAApplyCal(uv, err, maxInter=1440.0, logfile=logfile, check=check,debug=debug)
if retCode!=0:
return retCode
# Plot fits? Tolerate failure.
if doPlot:
xretCode = EVLAPlotTab(uv, "SN", SNver, err, nplots=6, optype="AMP ", \
logfile=logfile, check=check, debug=debug)
if xretCode==0:
retCode = EVLAWritePlots (uv, 1, 0, plotFile, err, \
plotDesc="SysPower gain plots", \
logfile=logfile, check=check, debug=debug)
# end SN table plot
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
return 0
# end EVLASYCal
def EVLACalAP(uv, target, ACals, err, \
PCals=None, FQid=0, calFlux=None, timeRange = [0.0,1.0e20], UVRange=[0.,0.], \
doCalib=-1, gainUse=0, doBand=0, BPVer=0, flagVer=-1, \
BChan=1, EChan=1, avgPol=False, \
solnver=0, solInt=10.0/60.0, solSmo=0.0, nThreads=1, refAnt=0, ampScalar=False, \
doAmpEdit=False, ampSigma=20, flagFail=True, ampEditFG=-1,\
doPlot=False, plotFile="./APCal.ps", \
check=False, debug = False, noScrat=[], logfile = ""):
"""
Basic Amplitude and phase cal for EVLA data
Amplitude calibration can be based either on a point flux
density or a calibrator model.
An attempt is made to use the setjy.OPType="CALC" option.
Optional editing/flagging on the basis of deviant amplitudes.
Returns task error code, 0=OK, else failed
* uv = UV data object to calibrate
* target = Target source name or list of names to calibrate
* ACals = List of Amp calibrators possibly with model
Will use CalModelFlux and CalModelSI if given
* err = Obit error/message stack
* PCals = if given, List of phase calibrators possibly with model
* FQid = Frequency Id to process, 0=>any
* BChan = First (1-rel channel to include
* EChan = Highest channel to include
* timeRange= timeRange for solutions
* UVRange = UV range (klambda) for solutions
* avgPol = Average polarizations before solving?
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* flagVer = Flagging table to apply
* solnver = output SN table version (+1 if smooth), 0=>new
* solInt = solution interval (min)
* solSmo = if solSmo<solInt smooth amp solutions to solSmo
* nThreads = Number of threads to use
* refAnt = Reference antenna
* ampScalar= If true, scalar average data in calibration?
* doAmpEdit= Edit/flag on the basis of amplitude solutions
* ampSigma = Multiple of median RMS about median gain to clip/flag
Should be fairly large
* ampEditFG= FG table to add flags to, <=0 -> no FG entries
* flagFail = If True enter times for failed solutions if FG mpEditFG
* doPlot = If True make plots of solutions
* plotFile = Name of postscript file for plots
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for tasks
"""
################################################################
mess = "Amplitude and phase calibration"
printMess(mess, logfile)
solnVer2 = None
# Run SetJy
setjy = ObitTask.ObitTask("SetJy")
try:
setjy.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
setjy.taskLog = logfile
if not check:
setname(uv,setjy)
OK = False # Must have some work
# Loop over calibrators
OKAmpCals = [] # Calibrators SetJy is happy with
BadAmpCals = [] # Calibrators SetJy is unhappy with
for ACal in ACals:
setjy.Sources[0] = ACal["Source"]
if FQid:
setjy.FreqID=FQid
if ACal["CalModelFlux"]>0.0 and (not ACal["useSetJy"]): # Model given?
setjy.ZeroFlux[0] = ACal["CalModelFlux"]
if "Alpha" in setjy.__dict__:
setjy.Alpha = ACal["CalModelSI"]
else:
setjy.OPType="CALC"
setjy.ZeroFlux=[1.0,0.0,0.0,0.0]
setjy.Parms[1] = 4 # Perley and Butler 2012
if debug:
setjy.i
setjy.debug = debug
# Trap failure
try:
if not check:
setjy.g
except Exception as exception:
print(exception)
mess = "SetJy Failed retCode="+str(setjy.retCode)+" for "+setjy.Sources[0]
printMess(mess, logfile)
# return 1 # allow some failures
BadAmpCals.append(setjy.Sources[0])
else:
OK = True
OKAmpCals.append(setjy.Sources[0])
# end loop over calibrators
# Something work?
if not OK:
printMess("All Amplitude calibrators failed", logfile)
return 1
# output SN version
if solnver<=0:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
solnVer = max(1,uv.GetHighVer("AIPS SN")+1)
else:
solnVer = solnver
# Phase cals and failed amp cals to 1.0
if PCals: # Any given?
callist = []
for PCal in PCals:
if PCal["Source"] not in OKAmpCals:
callist.append(PCal["Source"])
for cal in BadAmpCals:
if cal not in OKAmpCals:
callist.append(cal)
if len(callist)>0:
setjy.ZeroFlux=[1.0,0.0,0.0,0.0]
setjy.OPType="REJY"
#setjy.debug = True # DEBUG
for cal in callist:
setjy.Sources[0] = cal
if debug:
setjy.i
setjy.debug = debug
# Trap failure
try:
if not check:
setjy.g
except Exception as exception:
print(exception)
mess = "SetJy Failed retCode="+str(setjy.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end if PCals
# Calib on Amp cals
calib = ObitTask.ObitTask("Calib")
try:
calib.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
calib.taskLog = logfile
if not check:
setname(uv,calib)
calib.flagVer = flagVer
calib.ampScalar= ampScalar
calib.doCalib = doCalib
calib.gainUse = gainUse
calib.doBand = doBand
calib.BPVer = BPVer
calib.solMode = "A&P"
calib.solType = "L1"
calib.nThreads = nThreads
calib.solInt = solInt
calib.refAnts = [refAnt]
calib.noScrat = noScrat
calib.solnVer = solnVer
calib.avgPol = avgPol
calib.UVRange = UVRange
calib.timeRange= timeRange
OK = False # Must have some work
OKCals2 = [] # List of ones that worked
# Loop over calibrators
for ACal in ACals:
calib.Sources[0]= ACal["Source"]
calib.DataType2 = ACal["CalDataType"]
calib.in2File = ACal["CalFile"]
calib.in2Name = ACal["CalName"]
calib.in2Class = ACal["CalClass"]
calib.in2Seq = ACal["CalSeq"]
calib.in2Disk = ACal["CalDisk"]
calib.nfield = ACal["CalNfield"]
calib.CCVer = ACal["CalCCVer"]
calib.BComp = ACal["CalBComp"]
calib.EComp = ACal["CalEComp"]
calib.Cmethod = ACal["CalCmethod"]
calib.Cmodel = ACal["CalCmodel"]
calib.Flux = ACal["CalFlux"]
calib.Alpha = ACal["CalModelSI"]
calib.modelFlux = ACal["CalModelFlux"]
calib.modelPos = ACal["CalModelPos"]
calib.modelParm = ACal["CalModelParm"]
if debug:
calib.i
calib.debug = debug
#calib.prtLv = 5
# Trap failure
try:
mess = "Run Calib on "+calib.Sources[0]
printMess(mess, logfile)
if not check:
calib.g
except Exception as exception:
print(exception)
mess = "Calib Failed retCode= "+str(calib.retCode)+" Source "+calib.Sources[0]
printMess(mess, logfile)
#return 1 # allow some failures
else:
OK = True
OKCals2.append(calib.Sources[0])
# end calibration loop
# Something work?
if not OK:
printMess("All amplitude calibrators failed", logfile)
return 1
# Setup CLCal for calibrators
clcal = ObitTask.ObitTask("CLCal")
try:
clcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
clcal.taskLog = logfile
ical = 0
if not check:
setname(uv,clcal)
for ACal in ACals:
clcal.calSour[ical] = ACal["Source"]
ical += 1
if not check:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
clcal.calIn = uv.GetHighVer("AIPS CL")
else:
clcal.calIn = 1
clcal.calOut = clcal.calIn+1
clcal.interMode = "SELF"
clcal.FreqID = FQid
# Calib on phase reference if given
if PCals:
OK = False # Must have some work
# Loop over calibrators
for PCal in PCals:
mess = "Consider Calib for "+PCal["Source"]
printMess(mess, logfile)
# Ignore if already done in ACals
doIgnore = False
for ACal in ACals:
if ACal["Source"]==PCal["Source"]:
doIgnore = True
break
if doIgnore:
mess = PCal["Source"]+" in ACal list"
printMess(mess, logfile)
continue
calib.Sources[0]= PCal["Source"]
calib.DataType2 = PCal["CalDataType"]
calib.in2File = PCal["CalFile"]
calib.in2Name = PCal["CalName"]
calib.in2Class = PCal["CalClass"]
calib.in2Seq = PCal["CalSeq"]
calib.in2Disk = PCal["CalDisk"]
calib.nfield = PCal["CalNfield"]
calib.CCVer = PCal["CalCCVer"]
calib.BComp = PCal["CalBComp"]
calib.EComp = PCal["CalEComp"]
calib.Cmethod = PCal["CalCmethod"]
calib.Cmodel = PCal["CalCmodel"]
calib.Flux = PCal["CalFlux"]
calib.Alpha = PCal["CalModelSI"]
calib.modelFlux = PCal["CalModelFlux"]
calib.modelPos = PCal["CalModelPos"]
calib.modelParm = PCal["CalModelParm"]
if debug:
calib.i
calib.debug = debug
# Trap failure
try:
mess = "Run Calib on "+calib.Sources[0]
printMess(mess, logfile)
if not check:
calib.g
except Exception as exception:
print(exception)
mess = "Calib Failed retCode= "+str(calib.retCode)+" Source "+calib.Sources[0]
printMess(mess, logfile)
#return 1 # Allow some to fail
else:
OK = True
OKCals2.append(calib.Sources[0])
# end phase calibration loop
# Something work?
if not OK:
printMess("All phase calibrators failed", logfile)
return 1
# end if phase cals
solnVer2 = calib.solnVer
# Tell OKCals2 etc if debug
if debug:
mess = "OKCals2="+str(OKCals2)
printMess(mess, logfile)
mess = "OKAmpCals="+str(OKAmpCals)
printMess(mess, logfile)
mess = "BadAmpCals="+str(BadAmpCals)
# GetJy to set flux density scale
getjy = ObitTask.ObitTask("GetJy")
try:
getjy.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
getjy.taskLog = logfile
ical = 0; isou = 0
if not check:
setname(uv,getjy)
for ACal in OKAmpCals:
if ACal in OKCals2:
getjy.calSour[ical] = ACal
ical += 1
# Amplitude calibrators with no flux
for cal in BadAmpCals:
if (cal not in getjy.calSour) and (cal not in getjy.Sources) \
and (cal in OKCals2):
getjy.Sources[isou] = cal
isou += 1
# Phase calibrators
if PCals:
for PCal in PCals:
if (PCal["Source"] not in getjy.calSour) \
and (PCal["Source"] in OKCals2) \
and (PCal["Source"] not in getjy.Sources):
getjy.Sources[isou] = PCal["Source"]
isou += 1
getjy.solnVer = solnVer2
getjy.FreqID = FQid
if debug:
getjy.i
getjy.debug = debug
# Trap failure
try:
if not check:
getjy.g
except Exception as exception:
print(exception)
mess = "GetJy Failed retCode="+str(getjy.retCode)
printMess(mess, logfile)
return 1
else:
pass
# enter flagged solutions in FG table?
if flagFail:
EVLAFlagFailSN(uv, 0, err, FGver=ampEditFG, \
logfile=logfile, check=check, debug=debug)
OErr.printErrMsg(err, "Error flagging data with failed solutions")
# end flagFail
# Clip/flag by deviant amplitudes?
if doAmpEdit:
EVLAEditSNAmp(uv, 0, err, sigma=ampSigma, FGver=ampEditFG, \
logfile=logfile, check=check, debug=debug)
OErr.printErrMsg(err, "Error clip/flag bad amplitudes")
# end Amp edit
# Smoothing?
if solSmo>solInt:
solnVer = solnVer2 # input is old output
solnVer2 = solnVer+1 # new output= smoothed
snsmo=ObitTask.ObitTask("SNSmo")
try:
snsmo.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
snsmo.taskLog = logfile
if not check:
setname(uv,snsmo)
snsmo.solnIn = solnVer
snsmo.solnOut = solnVer2
snsmo.smoType = "AMPL"
snsmo.smoFunc = "MWF"
snsmo.refAnt = refAnt
snsmo.smoParm = [solSmo/60., solSmo/60.]
snsmo.clipSmo = [solSmo/60.0] # Clip wild amplitudes
snsmo.clipParm= [0.5]
mess = "Smooth SN "+str(snsmo.solnIn)+" to "+str(snsmo.solnOut)
printMess(mess, logfile)
if debug:
snsmo.i
snsmo.debug = debug
# run on all sources
# Trap failure
try:
if not check:
snsmo.g
except Exception as exception:
print(exception)
mess = "SNSmo Failed retCode="+str(snsmo.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end smoothing
# Plot gain corrections?
if solnVer2==None:
solnVer2 = solnVer
if doPlot: # Tolerate failure
# Amplitude corrections
yretCode = EVLAPlotTab(uv, "SN", solnVer2, err, nplots=6, optype="AMP ", \
logfile=logfile, check=check, debug=debug)
# Phase corrections
xretCode = EVLAPlotTab(uv, "SN", solnVer2, err, nplots=6, optype="PHAS", \
logfile=logfile, check=check, debug=debug)
# R-L phase corrections
zretCode = EVLAPlotTab(uv, "SN", solnVer2, err, nplots=6, optype="PHAS", stokes="DIFF", \
logfile=logfile, check=check, debug=debug)
if (xretCode==0) and (yretCode==0) and (zretCode==0):
retCode = EVLAWritePlots (uv, 1, 0, plotFile, err, \
plotDesc="Amplitude and phase calibration plots", \
logfile=logfile, check=check, debug=debug)
# end SN table plot
# Set up for CLCal calibrators only - use phase & amp calibrators
if not check:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
clcal.solnVer = uv.GetHighVer("AIPS SN")
else:
clcal.solnVer = 1
ical = 0
maxCal = len(clcal.calSour) # Maximum number of entries in clcal.calSour
clcal.interMode = "SELF"
if PCals:
for PCal in PCals:
clcal.calSour[ical] = PCal["Source"]
clcal.Sources[ical] = PCal["Source"]
if ical>=maxCal:
break
ical += 1
for ACal in ACals:
if ACal["Source"] not in clcal.calSour and ical<maxCal:
clcal.calSour[ical] = ACal["Source"]
clcal.Sources[ical] = ACal["Source"]
ical += 1
# Apply to all
mess = "Apply calibration for calibrators"
printMess(mess, logfile)
mess = "Update CL "+str(clcal.calIn)+" with SN "+str(clcal.solnVer)+" to CL "+str(clcal.calOut)
printMess(mess, logfile)
if debug:
clcal.i
clcal.debug = debug
# Trap failure
try:
if not check:
clcal.g
except Exception as exception:
print(exception)
mess = "clcal Failed retCode="+str(clcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
# Set up for CLCal on targets, use only Phase cals
clcal.interMode = "2PT"
for k in range(0,len(clcal.calSour)):
clcal.calSour[k] = ''
for k in range(0,len(clcal.Sources)):
clcal.Sources[k] = ''
# Get list of targets not in a calibrator list
clist = [] # list of calibrators
for c in ACal:
clist.append(ACal["Source"])
for c in PCal:
clist.append(PCal["Source"])
if type(target)==list:
# Use souCode='-CAL' to get all non calibrators
clcal.souCode = '-CAL'
if len(clcal.Sources)>=len(target):
tlist = target # List of targets not in calibrator list
else:
tlist = [] # Everything
else:
tlist=[target]
itarg = 0
maxCal = len(clcal.Sources) # Maximum number of entries in clcal.Sources
for t in tlist:
clcal.Sources[itarg] = t
if itarg>=maxCal:
break
itarg +=1
ical = 0
maxCal = len(clcal.calSour) # Maximum number of entries in clcal.calSour
if PCals:
for PCal in PCals:
clcal.calSour[ical] = PCal["Source"]
if ical>=maxCal:
break
ical += 1
# Apply to targets
mess = "Apply calibration for targets"
printMess(mess, logfile)
mess = "Update CL "+str(clcal.calIn)+" with SN "+str(clcal.solnVer)+" to CL "+str(clcal.calOut)
printMess(mess, logfile)
if debug:
clcal.i
clcal.debug = debug
# Trap failure
try:
if not check:
clcal.g
except Exception as exception:
print(exception)
mess = "clcal Failed retCode="+str(clcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLACal
def EVLABPCal(uv, BPCals, err, newBPVer=1, timerange=[0.,0.], UVRange=[0.,0.], \
doCalib=2, gainUse=0, doBand=0, BPVer=0, flagVer=-1, \
doCenter1=None, BChan1=1, EChan1=0, \
BChan2=1, EChan2=0, ChWid2=1, \
solInt1=0.0, solInt2=0.0, solMode="A&P", solType="L1",refAnt=0, ampScalar=False, \
doAuto=False, doPol=False, avgPol=False, avgIF=False, doAmpEdit=False, ampSigma=20, \
doPlot=False, plotFile="./BPSpec.ps", doBPPlot=False, plotBPFile="./BPTab.ps", \
check=False, debug = False, nThreads=1, noScrat=[], logfile = ""):
"""
Bandbass calibration
Do bandbass calibration, write BP table
Returns task error code, 0=OK, else failed
Calibration is done in two passes
1) First a wideband phase only calibration using channels
BChan1 to EChan1 or the central doCenter1 fraction of the band
using a solution interval of solInt1. This solution is applied
to all selected data and used in the second pass.
2) Second channels in the range BChan2 to EChan2 averaging blocks
of ChWid2 are calibrated using solType and solMode for solInt2 and
the results written as the output BP table.
3) Output BP table may be edited blanking solutions excessively
deviant from the antenna median.
The Calibrator model may be given as either and Image with CC table,
a parameterized model or a point source with the flux density in
the SU table.
See BPass documentation for details
* uv = UV data object to calibrate
* BPCals = list of bandpass calibrators/models
* err = Obit error/message stack
* newBPVer = output BP table
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* flagVer = Input Flagging table version
* timerange= timerange in days to use
* UVRange = UV range (klambda) for solutions
* doCenter1= If defined, the center fraction of the bandpass to use first pass
* BChan1 = Low freq. channel, initial cal
* EChan1 = Highest freq channel, initial cal
* BChan2 = Low freq. channel for BP cal
* EChan2 = Highest freq channel for BP cal
* ChWid2 = Number of channels in running mean BP soln,
* solInt1 = first solution interval (min), 0=> scan average
* solInt2 = second solution interval (min)
* solMode = solution mode 'A&P', 'P', 'P!A'
* refAnt = Reference antenna
* ampScalar= If true, scalar average data in calibration
* doAuto = Use autocorrelation spectra? Else, crosscorrelation
* doPol = Apply polarization cal?
* avgPol = Avg. poln. in solutions?
* avgIF = Avg. IFs. in solutions?
* doAmpEdit= Edit/flag on the basis of amplitude solutions
* ampSigma = Multiple of median RMS about median gain to clip/flag
Should be fairly large
* doPlot = If True make plots of corrected data
* plotFile = Name of postscript file for plots
* doBPPlot = If True make plots of bandpass table
* plotBPFile = Name of postscript file for bandpass table plots
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* nThreads = Number of threads to use
* noScrat = list of AIPS disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
mess = "Bandpass calibrate data"
printMess(mess, logfile)
bpass = ObitTask.ObitTask("BPass")
try:
bpass.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
OK = False # Must have some work
bpass.taskLog = logfile
if not check:
setname(uv,bpass)
bpass.doBand = doBand
bpass.BPVer = BPVer
bpass.BPSoln = newBPVer
bpass.doCalib = doCalib
bpass.gainUse = gainUse
bpass.flagVer = flagVer
bpass.doPol = doPol
bpass.solInt1 = solInt1
bpass.solInt2 = solInt2
bpass.solMode = solMode
bpass.solType = solType
bpass.refAnt = refAnt
bpass.timeRange = timerange
bpass.UVRange = UVRange
bpass.ChWid2 = ChWid2
bpass.doAuto = doAuto
bpass.avgPol = avgPol
bpass.avgIF = avgIF
bpass.ampScalar = ampScalar
bpass.noScrat = noScrat
bpass.nThreads = nThreads
# Channel selection
if not check:
d = uv.Desc.Dict
nchan = int(d["inaxes"][d["jlocf"]])
else:
nchan = 1
# Center fraction requested?
if doCenter1:
# Center doCenter1 fraction of channels for first cal
mchan = int(nchan*doCenter1)
bpass.BChan1 = max(1, int(nchan//2)-int(mchan//2))
bpass.EChan1 = min(nchan, int(nchan//2)+int(mchan//2))
else:
bpass.BChan1 = BChan1
bpass.EChan1 = EChan1
bpass.BChan2 = BChan2
bpass.EChan2 = EChan2
if bpass.EChan2<=0:
bpass.EChan2 = nchan
# Loop over calibrators
for BPCal in BPCals:
bpass.Sources[0]= BPCal["Source"]
bpass.DataType2 = BPCal["CalDataType"]
bpass.in2File = BPCal["CalFile"]
bpass.in2Name = BPCal["CalName"]
bpass.in2Class = BPCal["CalClass"]
bpass.in2Seq = BPCal["CalSeq"]
bpass.in2Disk = BPCal["CalDisk"]
bpass.nfield = BPCal["CalNfield"]
bpass.CCVer = BPCal["CalCCVer"]
bpass.BComp = BPCal["CalBComp"]
bpass.EComp = BPCal["CalEComp"]
bpass.Cmethod = BPCal["CalCmethod"]
bpass.Cmodel = BPCal["CalCmodel"]
bpass.Flux = BPCal["CalFlux"]
bpass.Alpha = BPCal["CalModelSI"]
bpass.modelFlux = BPCal["CalModelFlux"]
bpass.modelPos = BPCal["CalModelPos"]
bpass.modelParm = BPCal["CalModelParm"]
if debug:
bpass.i
bpass.debug = debug
# Trap failure
try:
mess = "Run BPass on "+bpass.Sources[0]
printMess(mess, logfile)
if not check:
bpass.g
pass
except Exception as exception:
print(exception)
mess = "BPass Failed retCode="+str(bpass.retCode)
printMess(mess, logfile)
#return 1
else:
OK = True
# End calibrator loop
# Something work?
if not OK:
printMess("All BPass calibration failed", logfile)
return 1
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
# Clip/flag by deviant amplitudes?
if doAmpEdit:
EVLAEditBPAmp(uv, newBPVer, err, sigma=ampSigma, \
logfile=logfile, check=check, debug=debug)
OErr.printErrMsg(err, "Error clip/flag bad amplitudes")
# end Amp edit
# Plot corrected data?
if doPlot:
scr = EVLASpecPlot( uv, bpass.Sources[0], timerange, refAnt, err, \
Stokes=["RR","LL"], doband=1, \
plotFile=plotFile, check=check, logfile=logfile )
if not UV.PIsA(scr):
return 0 # tolerate failure
retCode = EVLAWritePlots (scr, 1, 0, plotFile, err, \
plotDesc="Bandpass calibration plots", \
logfile=logfile, check=check, debug=debug)
if retCode!=0:
return retCode
scr.Zap(err)
# end data plots
# Plot BP table?
if doBPPlot:
retCode = EVLAPlotBPTab(uv, newBPVer, err, \
check=check, logfile=logfile )
if retCode!=0:
return 0 # tolerate failure
retCode = EVLAWritePlots (uv, 1, 0, plotBPFile, err, \
plotDesc="Bandpass table plots", \
logfile=logfile, check=check, debug=debug)
if retCode!=0:
return retCode
# end BP tableplots
return 0
# End EVLABPCal
def EVLASplit(uv, target, err, FQid=1, outClass=" ", logfile = "", \
check=False, debug = False):
"""
Write calibrated data
Returns task error code, 0=OK, else failed
* uv = UV data object to clear
* target = Target source name source name or list of names
* err = Obit error/message stack
* FQid = Frequency Id to process
* logfile = Log file for task
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
split=ObitTask.ObitTask("Split")
try:
split.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
split.taskLog = logfile
if not check:
setname(uv,split)
if type(target)==list:
split.Sources=target
else:
split.Sources=[target]
split.doCalib = 2
split.gainUse = 0
split.flagVer = 1
split.FreqID = FQid
split.outClass = outClass
split.outDisk = split.inDisk
if debug:
split.i
split.debug = debug
# Trap failure
try:
if not check:
split.g
except Exception as exception:
print(exception)
mess = "split Failed retCode="+str(split.retCode)
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLAsplit
def EVLACalAvg(uv, avgClass, avgSeq, CalAvgTime, err, \
FQid=0, \
flagVer=0, doCalib=2, gainUse=0, doBand=1, BPVer=0, doPol=False, \
BIF=1, EIF=0, BChan=1, EChan=0, \
avgFreq=0, chAvg=1, Compress=False, \
noScrat=[], nThreads=1, logfile = "", check=False, debug=False):
"""
Calibrate, select and/or average data to a multisource file
Returns task error code, 0=OK, else failed
Generates NX and initial dummy CL table if needed
* uv = UV data object to clear
* avgClass = Class name of averaged data
* avgSeq = Sequence number of averaged data
* CalAvgTime = Averaging time in sec
* err = Obit error/message stack
* FQid = Frequency Id to process, 0=>all
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* doPol = Calibrate polarization?
* BIF = first IF to copy
* EIF = highest IF to copy
* BChan = first channel to copy
* EChan = highest channel to copy
* flagVer = Input Flagging table version
* avgFreq = If 0 < avgFreq <= 1 then average channels
* chAvg = Number of channels to average
* Compress = Write "Compressed" data?
* noScrat = list of AIPS disks to avoid for scratch files
* nThreads = Number of threads to use
* logfile = Log file for task
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
mess = "Average/calibrate data"
printMess(mess, logfile)
# sanity check for averaging in freq
if avgFreq==0:
chAvg = 1
splat=ObitTask.ObitTask("Splat")
try:
splat.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
splat.taskLog = logfile
if not check:
setname(uv,splat)
splat.doCalib = doCalib
splat.gainUse = gainUse
splat.doBand = doBand
splat.BPVer = BPVer
splat.doPol = doPol
splat.BIF = BIF
splat.EIF = EIF
splat.BChan = BChan
splat.EChan = EChan
splat.flagVer = flagVer
splat.FreqID = FQid
splat.timeAvg = CalAvgTime
splat.avgFreq = avgFreq
splat.chAvg = chAvg
splat.Compress = Compress
splat.outClass = avgClass
splat.outDisk = splat.inDisk
splat.outSeq = avgSeq
splat.noScrat = noScrat
splat.nThreads = nThreads
if debug:
splat.i
splat.debug = debug
# Trap failure
try:
if not check:
splat.g
pass
except Exception as exception:
print(exception)
mess = "Splat Failed retCode="+str(splat.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end average
# Get calibrated/averaged data, index and make CL table 1 if doCalib>0
if not check:
mess = "Generate output CL table"
printMess(mess, logfile)
try:
uvc = UV.newPAUV("AIPS UV DATA", splat.inName, avgClass, splat.inDisk, avgSeq, True, err)
if err.isErr:
print("Error creating cal/avg AIPS data")
OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
# Dummy CL table
solint = splat.timeAvg * 2 # CL table interval twice averaging
# Open and close image to sync with disk
uvc.Open(UV.READONLY, err)
uvc.Close(err)
hiver = uvc.GetHighVer("AIPS CL")
if (doCalib>0) or (hiver<=0):
UV.PTableCLGetDummy(uvc, uvc, 0, err, solInt=solint)
pass
if err.isErr:
print("Error creating cal/avg AIPS data CL table")
OErr.printErrMsg(err, "Error creating cal/avg AIPS data CL table")
# Index - now in Splat
#UV.PUtilIndex (uvc, err)
# Zap any SY tables
z=uvc.ZapTable("AIPS SY",-1,err)
if err.isErr:
print("Error indexing cal/avg AIPS data")
OErr.printErrMsg(err, "Error indexing cal/avg AIPS data")
del uvc
except Exception as exception:
print(exception)
OErr.printErr(err)
mess = "Indexing or creating CL table failed"
printMess(mess, logfile)
return 1
else:
pass
return 0
# end EVLACalAvg
def EVLACalAvg2(uv, avgClass, avgSeq, CalAvgTime, err, FQid=0, \
flagVer=0, doCalib=2, gainUse=0, doBand=1, BPVer=0, doPol=False, \
BIF=1, EIF=0, BChan=1, EChan=0, chAvg=1, Compress=False, \
logfile = "", check=False, debug=False):
"""
Calibrate and average data to a multisource file
Returns task error code, 0=OK, else failed
Generates NX and initial dummy CL table
* uv = UV data object to clear
* avgClass = Class name of averaged data
* avgSeq = Sequence number of averaged data
* CalAvgTime = Averaging time in sec
* err = Obit error/message stack
* FQid = Frequency Id to process, 0=>all
* doPol = Calibrate polarization?
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* BIF = first IF to copy
* EIF = highest IF to copy
* BChan = first channel to copy
* EChan = highest channel to copy
* flagVer = Input Flagging table version
* Compress = Write "Compressed" data?
* logfile = Log file for task
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
mess = "Average/calibrate calibrate data"
printMess(mess, logfile)
outuv = None
# Create output
if not check:
# Set calibration, editing and selection
info = uv.List
info.set("doCalSelect", True)
info.set("FreqID", FQid)
info.set("doPol", doPol)
info.set("doCalib", doCalib)
info.set("gainUse", gainUse)
info.set("doBand", doBand)
info.set("doPol", doPol)
info.set("BPVer", BPVer)
info.set("BIF", BIF)
info.set("EIF", EIF)
info.set("BChan", BChan)
info.set("EChan", EChan)
info.set("flagVer", flagVer)
info.set("Compress", Compress,)
#print "info", info.Dict # DEBUG
# Open and close to set
uv.Open(UV.READCAL, err)
outuv = UV.newPAUV("CalAvg", uv.Aname, avgClass, uv.Disk, avgSeq, False, err)
uv.Clone (outuv, err)
uv.Close(err)
#outuv.Header(err) # debug
if err.isErr:
print("Error creating cal/avg AIPS uv data")
OErr.printErrMsg(err, "Error creating cal/avg AIPS data")
# Average
if not check:
try:
mess = "Copy/average/calibrate data to "+\
outuv.Aname+" . "+outuv.Aclass+" . "+str(outuv.Disk)+ \
" . "+str(outuv.Aseq)+" cno: "+str(outuv.Acno)
printMess(mess, logfile)
info = outuv.List
info.set("Compress", Compress,)
UV.PUtilAvgT (uv, outuv, err, timeAvg=CalAvgTime/60.)
if err.isErr:
print("Error cal/avg AIPS uv data")
OErr.printErrMsg(err, "Error cal/avg AIPS data")
# Do History - previous already copied
if outuv:
del outuv
outuv = UV.newPAUV("CalAvg", uv.Aname, avgClass, uv.Disk, avgSeq, True, err)
#print "DEBUG Copy history"
inHistory = History.History("inhistory", uv.List, err)
outHistory = History.History("outhistory", outuv.List, err)
# Add history
#print "DEBUG Add history"
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit CalAvg",err)
outHistory.WriteRec(-1,"CalAvg CalAvgTime = "+str(CalAvgTime),err)
outHistory.WriteRec(-1,"CalAvg inName = "+uv.Aname, err)
outHistory.WriteRec(-1,"CalAvg inClass = "+uv.Aclass, err)
outHistory.WriteRec(-1,"CalAvg inDisk = " +str(uv.Disk),err)
outHistory.WriteRec(-1,"CalAvg inSeq = " +str(uv.Aseq),err)
outHistory.WriteRec(-1,"CalAvg FreqID = "+str(FQid),err)
outHistory.WriteRec(-1,"CalAvg doPol = "+str(doPol),err)
outHistory.WriteRec(-1,"CalAvg doCalib = "+str(doCalib),err)
outHistory.WriteRec(-1,"CalAvg gainUse = "+str(gainUse),err)
outHistory.WriteRec(-1,"CalAvg doBand = "+str(doBand),err)
outHistory.WriteRec(-1,"CalAvg BPVer = "+str(BPVer),err)
outHistory.WriteRec(-1,"CalAvg BIF = "+str(BIF),err)
outHistory.WriteRec(-1,"CalAvg EIF = "+str(EIF),err)
outHistory.WriteRec(-1,"CalAvg BChan = "+str(BChan),err)
outHistory.WriteRec(-1,"CalAvg EChan = "+str(EChan),err)
outHistory.WriteRec(-1,"CalAvg flagVer = "+str(flagVer),err)
outHistory.WriteRec(-1,"CalAvg Compress = "+str(Compress),err)
outHistory.Close(err)
#print "DEBUG Copy history done"
if err.isErr:
print("Error cal/avg History")
OErr.printErrMsg(err, "Error cal/avg History")
# end copy+history
except Exception as exception:
print(exception)
OErr.printErr(err)
mess = "Calibrate and average uv data failed"
printMess(mess, logfile)
return 1
else:
pass
# Index and make CL table
if not check:
try:
# Dummy CL table
solint = 2 * CalAvgTime/60. # CL table interval twice averaging
UV.PTableCLGetDummy(outuv, outuv, 0, err, solInt=solint)
if err.isErr:
print("Error creating cal/avg AIPS data CL table")
OErr.printErrMsg(err, "Error creating cal/avg AIPS data CL table")
# Index
UV.PUtilIndex (outuv, err)
if err.isErr:
print("Error indexing cal/avg AIPS data")
OErr.printErrMsg(err, "Error indexing cal/avg AIPS data")
except Exception as exception:
print(exception)
OErr.printErr(err)
mess = "Indexing or creating CL table failed"
printMess(mess, logfile)
return 1
else:
pass
if outuv:
del outuv
return 0
# end EVLACalAvg2
def EVLASetImager (uv, target, outIclass="", nThreads=1, noScrat=[], logfile = ""):
"""
Setup to run Imager or MFImage
return MFImage task interface object
* uv = UV data object to image
* target = Target source name or list of names
* outIclass= output class
* FQid = Frequency Id to process
* nThreads = Number of threads to use
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
img = ObitTask.ObitTask("MFImage")
try:
img.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
img.taskLog = logfile
if not check:
setname(uv,img)
img.outDisk = img.inDisk
img.out2Disk = img.inDisk
if type(target)==list:
img.Sources=target
else:
img.Sources=[target]
img.outClass = outIclass
img.doCalib = 2
img.doBand = 1
img.UVTaper = [0.0, 0.0, 0.0]
img.UVRange = [0.0,0.0]
img.FOV = 0.05
img.autoWindow = True
img.BLFact = 1.01
img.BLchAvg = True
img.Niter = 5000
img.Gain = 0.10
img.maxPSCLoop = 3
img.minFluxPSC= 0.5
img.solPInt = 10.0/60.
img.solPType = "L1"
img.maxASCLoop= 1
img.minFluxPSC= 1.5
img.solAInt = 1.0
img.minSNR = 3.0
img.avgPol = True
img.avgIF = True
img.nThreads = nThreads
img.noScrat = noScrat
return img
# end EVLASetImager
def EVLARLDelay(uv, err, \
RLDCal=None, BChan=1, EChan = 0,\
UVRange=[0.0,0.0], timerange = [0.0,1000.0], \
soucode=" ", doCalib=-1, gainUse=0, \
doBand=0, BPVer=0, flagVer=-1, \
refAnt=0, Antennas=[0], doPol=-1, PDVer=1, numIFs=1, \
nThreads=1, noScrat=[], logfile = "",check=False, debug = False):
"""
Determine R-L delay
Returns task error code, 0=OK, else failed
R-L Delay calibration creating and applying new AIPS SN table
to (new) highest numbered CL table on uv
* uv = UV data object to clear
* err = Obit error/message stack
* RLDCal = An array of triplets with R-L calibrators:
(name, R-L phase (deg at 1 GHz), RM (rad/m**2))
NB: more than 1 may be a bad idea
* BChan = First (1-rel) channel number
* EChan = Highest channel number. 0=> high in data.
* UVRange = Range of baseline used in kilowavelengths
* soucode = Calibrator code
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* timerange= time range of data (days)
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* flagVer = Flagging table to apply
* refAnt = Reference antenna REQUIRED
* numIFs = number of IFs to use per solution
* Antennas = List of antennas to include
* doPol = Apply polarization cal?
* PDVer = PD polarization table to apply.
* noScrat = list of AIPS disks to avoid for scratch files
* nThreads = Number of threads to use in imaging
* logfile = Log file for task
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
# Don't bother if not full polarization
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
mess = "Skip XPol delay corrections - not full stokes"
printMess(mess, logfile)
return 0
mess = "XPol delay calibration "
printMess(mess, logfile)
ncal = len(RLDCal) # How many calibrators?
OK = False # Must have some work
rldly=ObitTask.ObitTask("RLDly")
try:
rldly.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
rldly.taskLog = logfile
if not check:
setname(uv,rldly)
rldly.Antennas = Antennas
rldly.timeRange[0] = timerange[0]
rldly.timeRange[1] = timerange[1]
rldly.BChan = BChan
rldly.EChan = EChan
rldly.UVR_Full[0] = UVRange[0];
rldly.UVR_Full[1] = UVRange[1];
rldly.doCalib = doCalib
rldly.gainUse = gainUse
rldly.flagVer = flagVer
if ("numIFs" in rldly.__dict__):
rldly.numIFs = numIFs
rldly.doPol = doPol
if ("PDVer" in rldly.__dict__):
rldly.PDVer = PDVer
rldly.doBand = doBand
rldly.BPVer = BPVer
rldly.refAnt = refAnt
rldly.minSNR = 1 # Minimum SNR - this should be passed
rldly.prtLv = 1
rldly.nThreads = nThreads
# Loop over calibrators
for ical in range (0,ncal):
rldly.Sources[0]= RLDCal[ical][0]
rldly.RLPhase = RLDCal[ical][1]
rldly.RM = RLDCal[ical][2]
mess = "R-L delay calibration using "+rldly.Sources[0]
printMess(mess, logfile)
if debug:
print("timerange", rldly.timerang)
rldly.i
rldly.debug = True
# Trap failure
try:
if not check:
rldly.g
except Exception as exception:
print(exception)
mess = "rldly Failed retCode="+str(rldly.retCode)
printMess(mess, logfile)
#return 1
else:
OK = True
# end loop over calibrators
# Something work?
if not OK:
printMess("All RLDelay calibration failed", logfile)
return 1
# Get output SN table
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
lsnver = uv.GetHighVer("AIPS SN")
# Apply to CL table
retCode = EVLAApplyCal(uv, err, SNver=lsnver, CLin = gainUse, \
maxInter=14400.0, \
logfile=logfile, check=check,debug=debug)
if retCode!=0:
return retCode
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
# end R-L delay cal
return 0
# end EVLARLDelay
def EVLAPolCal(uv, InsCals, err, InsCalPoln=None, \
doCalib=2, gainUse=0, doBand=1, BPVer=0, flagVer=-1, \
solType=" ", fixPoln=False, avgIF=False, \
solInt=0.0, refAnt=0, ChInc=1, ChWid=1, \
doFitRL=False, doFitOri=True,
check=False, debug = False, \
nThreads=1, noScrat=[], logfile = ""):
"""
Instrumental Polarization
Do Instrumental
Instrumental cal uses PCal
Returns task error code, 0=OK, else failed
* uv = UV data object to calibrate
* InsCals = Instrumental poln calibrators, name or list of names
If None no instrumental cal
* err = Obit error/message stack
* InsCalPoln if non None then the list of source parameters as
tuples in the order of calibrators in InsCals,
(PPol, RLPhase, RM)
PPol = fractional poln, <0 => fit
RLPhase = R-L phase difference in deg
RM = Rotation measure
* doCalib = Apply prior calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* doBand = >0 => apply bandpass calibration
* BPVer = AIPS BP table to apply
* flagVer = Input Flagging table version
* solType = solution type, " ", "LM "
* fixPoln = if True, don't solve for source polarization in ins. cal
assumed 0, ignored if InsCalPoln given
* avgIF = NYI if True, average IFs in ins. cal.
* solInt = instrumental solution interval (min)
* refAnt = Reference antenna
* ChInc = channel increment for solutions
* ChWid = number of channels to average for solution.
* doFitRL = Fit R-L (or X-Y) gain phase
* doFitOri = Fit (linear feed) orientations?
* nThreads = Number of threads to use in imaging
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for task
"""
################################################################
# Don't bother if not full polarization
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
mess = "Skip Instrumental polarization corrections - not full stokes"
printMess(mess, logfile)
return 0
mess = "Instrumental polarization calibration "
printMess(mess, logfile)
# Instrumental calibration
if InsCals!=None:
pcal = ObitTask.ObitTask("PCal")
try:
pcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
pcal.logFile = logfile
if not check:
setname(uv,pcal)
if type(InsCals)==list:
pcal.Sources = InsCals
pcal.doFitI[0] = True
else:
pcal.Sources = [InsCals]
i = 0
for s in InsCals:
pcal.doFitI[i] = True
i += 1
# Polarization fixed?
if InsCalPoln:
if type(InsCals)==list:
n = len(InsCals)
else:
n = 1
for i in range(0,n):
if InsCalPoln[i][0]>=0.0:
pcal.doFitPol[i] = False
pcal.PPol[i] = InsCalPoln[i][0]
pcal.RLPhase[i] = InsCalPoln[i][1]
pcal.RM[i] = InsCalPoln[i][2]
else:
pcal.doFitPol[i] = True
elif fixPoln:
if type(InsCals)==list:
i = 0
for s in InsCals:
pcal.doFitPol[i] = False
i += 1
else:
pcal.doFitPol[0] = False
pcal.doCalib = doCalib
pcal.gainUse = gainUse
pcal.doBand = doBand
pcal.BPVer = BPVer
pcal.flagVer = flagVer
pcal.solnType = solType
pcal.solInt = solInt
pcal.ChInc = ChInc
pcal.ChWid = ChWid
pcal.refAnt = refAnt
pcal.doFitRL = doFitRL
pcal.doFitOri = doFitOri
pcal.prtLv = 2
pcal.PDSoln = 1
pcal.CPSoln = 1
pcal.nThreads = nThreads
for i in range(0,len(pcal.doFitI)):
pcal.doFitI[i] = True
pcal.taskLog = logfile
i = 1;
for d in noScrat:
pcal.noScrat[i] = d
i += 1
if debug:
pcal.i
pcal.debug = debug
# Trap failure
try:
if not check:
pcal.g
except Exception as exception:
print(exception)
mess = "PCal Failed retCode="+str(pcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end instrumental poln cal
return 0
# End EVLAPolCal
def EVLARLCal(uv, err, \
RLDCal=None, BChan=1, EChan = 0, ChWid2=1, solInt1=1./6, solInt2=10., \
RLPCal=None, RLPhase=0.0, RM=0.0, UVRange=[0.0,0.0], timerange = [0.0,1000.0], \
FQid=0, calcode=" ", doCalib=-1, gainUse=0, \
doBand=0, BPVer=0, BPSoln=0, flagVer=-1, \
refAnt=0, doPol=-1, PDVer=1, FOV=0.05, niter = 100, CleanRad=None, \
doPlot=False, plotFile="./BPCal.ps", \
nThreads=1, noScrat=[], logfile = "",check=False, debug = False):
"""
Determine R-L delay and/or phase calibration
Returns task error code, 0=OK, else failed
R-L Delay calibration using new BP table, if R-L phase (& RM) known for
calibrator(s), this also does the R-L phase calibration
R-L Phase Calibration applies to (new) highest numbered CL table on uv
* uv = UV data object to clear
* err = Obit error/message stack
* RLPCal = R-L (polarization angle) calibrator,
If None no R-L cal
* RLPhase = R-L phase of RLPCal (deg) at 1 GHz
* RM = R-L phase RM (NYI)
* RLDCal = An array of triplets with R-L calibrators:
(name, R-L phase (deg at 1 GHz), RM (rad/m**2))
If None no R-L delay cal
* solInt1 = first solution interval (min), 0=> scan average
* solInt2 = second solution interval (min)
* RLDPhase = R-L phase of RLPCal (deg) at 1 GHz
* BChan = First (1-rel) channel number
* EChan = Highest channel number. 0=> high in data.
* ChWid2 = Number of channels in running mean RL BP soln,
* UVRange = Range of baseline used in kilowavelengths
* FQid = Frequency Id to process
* calcode = Calibrator code
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* timerange= time range of data (days)
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* BPSoln = Output (BP) version 0=> High
* flagVer = Flagging table to apply
* refAnt = Reference antenna REQUIRED
* doPol = Apply polarization cal?
* PDVer = PD version for pol cal, -1=>use IF
* FOV = field of view radius (deg) needed to image RLPCal
* niter = Number of iterations of CLEAN in R-L cal
* CleanRad = CLEAN radius about center or None=autoWin
* doPlot = If True make plots of corrected data
* plotFile = Name of postscript file for plots
* noScrat = list of AIPS disks to avoid for scratch files
* nThreads = Number of threads to use in imaging
* logfile = Log file for task
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
# Don't bother if not full polarization
d = uv.Desc.Dict
nstoke = int(d["inaxes"][d["jlocs"]])
if nstoke<4:
mess = "Skip R-L polarization corrections - not full stokes"
printMess(mess, logfile)
return 0
mess = "R-L polarization calibration "
printMess(mess, logfile)
lbpver = BPVer # default bandpass in imaging
# Want R-L phase cal using delay calibrators?
if RLDCal!=None:
ncal = len(RLDCal) # How many calibrators?
rlpass=ObitTask.ObitTask("RLPass")
try:
rlpass.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
rlpass.taskLog = logfile
if not check:
setname(uv,rlpass)
#if Antennas:
# i = 0
# for a in Antennas:
# rlpass.Antennas[i] = a; i += 1
rlpass.timeRange[0] = timerange[0]
rlpass.timeRange[1] = timerange[1]
rlpass.BChan1 = BChan
rlpass.EChan1 = EChan
rlpass.BChan2 = BChan
rlpass.EChan2 = EChan
rlpass.ChWid2 = ChWid2
rlpass.UVRange[0] = UVRange[0];
rlpass.UVRange[1] = UVRange[1];
rlpass.doCalib = doCalib
rlpass.gainUse = gainUse
rlpass.flagVer = flagVer
rlpass.FreqID = FQid
rlpass.doPol = doPol
if "PDVer" in rlpass.__dict__:
rlpass.PDVer = PDVer
rlpass.doBand = doBand
rlpass.BPVer = BPVer
rlpass.refAnt = refAnt
rlpass.solInt1 = solInt1
rlpass.solInt2 = solInt2
rlpass.BPSoln = BPSoln
rlpass.prtLv = 1
rlpass.nThreads = nThreads
# Loop over calibrators
for ical in range (0,ncal):
rlpass.Sources[0]= RLDCal[ical][0]
rlpass.RLPhase = RLDCal[ical][1]
rlpass.RM = RLDCal[ical][2]
mess = "R-L channel phase calibration using "+rlpass.Sources[0]
printMess(mess, logfile)
if debug:
print("timerange", rlpass.timerang)
rlpass.i
rlpass.debug = True
# Trap failure
try:
if not check:
rlpass.g
except Exception as exception:
print(exception)
mess = "rlpass Failed retCode="+str(rlpass.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end loop over calibrators
# Get output BP table
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
lbpver = uv.GetHighVer("AIPS BP")
# end R-L delay cal
# R-L phase cal
if RLPCal!=None:
mess = "R-L IF phase calibration using "+RLPCal
printMess(mess, logfile)
img = ObitTask.ObitTask("Imager")
try:
img.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
img.taskLog = logfile
if not check:
setname(uv,img)
if RLDCal==None:
img.doBand = doBand
img.BPVer = lbpver
else:
img.doBand = 1
img.BPVer = lbpver # Just created one
img.doCalib = doCalib
img.gainUse = gainUse
img.flagVer = flagVer
img.doPol = True
img.Sources[0] = RLPCal
img.Stokes = "IQU"
img.FOV = FOV
img.Niter = niter
# Auto window or centered box
if CleanRad:
img.CLEANBox=[-1,CleanRad,0,0]
else:
img.autoWindow = True
img.dispURL = "None"
img.BLFact = 1.004
img.Catalog = "None"
img.nThreads = nThreads
img.maxPSCLoop = 2
img.minFluxPSC = 0.05
img.solPInt = solInt1
img.solPType = "L1"
img.prtLv = 2
img.noScrat = noScrat
# Temporary output files
if img.DataType=="AIPS":
img.outName = "TEMP"
img.outClass= "IPOLCL"
img.outDisk = img.inDisk
img.outSeq = 6666
img.out2Name = "TEMP"
img.out2Class= "IPOLCL"
img.out2Disk = img.inDisk
img.out2Seq = 7777
elif img.DataType=="FITS":
img.outFile = "TEMPPOLCAL.fits"
img.outDisk = img.inDisk
img.out2File = "TEMPPOLCAL2.uvtab"
img.out2Disk = img.inDisk
# How many IFs?
if not check:
h = uv.Desc.Dict
if h["jlocif"]>=0:
nif = int(h["inaxes"][["jlocif"]])
else:
nif = 1
else:
nif = 1
# Lists of flux densities and RMSes
IFlux = []
IRMS = []
QFlux = []
QRMS = []
UFlux = []
URMS = []
# Loop over IF imaging I,Q, U, allow failure
for iif in range (1, nif+1):
img.BIF = iif
img.EIF = iif
#img.dispURL = "ObitView" # DEBUG
#img.debug=True # DEBUG
if debug:
img.i
img.debug = debug
# Trap failure
failed = False
try:
if not check:
img.g
except Exception as exception:
print(exception)
mess = "Imager Failed IF "+str(iif)+" retCode="+str(img.retCode)
printMess(mess, logfile)
failed = True
else:
pass
# Stub if failed
if failed:
IFlux.append(-1.0)
IRMS.append(-1.0)
QFlux.append(-1.0)
QRMS.append(-1.0)
UFlux.append(-1.0)
URMS.append(-1.0)
continue
if check: # Don't bother if only checking
continue
# Get fluxes from inner quarter of images
if img.DataType=="AIPS":
outName = (img.Sources[0].strip()+"TEMP")[0:12]
outDisk = img.outDisk
outSeq = 6666
# Stokes I
outClass="IPOLCL"
# Test if image exists
user = OSystem.PGetAIPSuser();
cno = AIPSDir.PTestCNO(outDisk, user, outName[0:12], outClass[0:6], "MA", outSeq, err)
if cno >= 0 :
x = Image.newPAImage("I",outName[0:12], outClass[0:6], outDisk,outSeq,True,err)
h = x.Desc.Dict
blc = [int(h["inaxes"][0]//4),int(h["inaxes"][1]//4)]
trc = [int(3*h["inaxes"][0]//4),int(3*h["inaxes"][1]//4)]
try:
stat = imstat(x, err, blc=blc,trc=trc,logfile=None)
IFlux.append(stat["Flux"])
IRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
except:
IFlux.append(-1.0)
IRMS.append(-1.0)
else:
IFlux.append(-1.0)
IRMS.append(-1.0)
# Stokes Q
outClass="QPOLCL"
cno = AIPSDir.PTestCNO(outDisk, user, outName[0:12], outClass[0:6], "MA", outSeq, err)
if cno > 0 :
x = Image.newPAImage("Q",outName[0:12], outClass[0:6], outDisk,outSeq,True,err)
h = x.Desc.Dict
blc = [int(h["inaxes"][0]//4),int(h["inaxes"][1]//4)]
trc = [int(3*h["inaxes"][0]//4),int(3*h["inaxes"][1]//4)]
try:
stat = imstat(x, err, blc=blc,trc=trc,logfile=None)
QFlux.append(stat["Flux"])
QRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
except:
QFlux.append(-1.0)
QRMS.append(-1.0)
else:
QFlux.append(-1.0)
QRMS.append(-1.0)
# Stokes U
outClass="UPOLCL"
cno = AIPSDir.PTestCNO(outDisk, user, outName[0:12], outClass[0:6], "MA", outSeq, err)
if cno > 0 :
x = Image.newPAImage("U",outName[0:12], outClass[0:6], outDisk,outSeq,True,err)
h = x.Desc.Dict
blc = [int(h["inaxes"][0]//4),int(h["inaxes"][1]//4)]
trc = [int(3*h["inaxes"][0]//4),int(3*h["inaxes"][1]//4)]
try:
stat = imstat(x, err, blc=blc,trc=trc,logfile=None)
UFlux.append(stat["Flux"])
URMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
except:
QUlux.append(-1.0)
QUMS.append(-1.0)
else:
UFlux.append(-1.0)
URMS.append(-1.0)
# Delete UV output
out2Name = (img.Sources[0].strip()+"TEMP")[0:12]
out2Class="IPOLCL"
out2Disk = img.inDisk
out2Seq = 7777
u = UV.newPAUV("UV",out2Name,out2Class,out2Disk,out2Seq,True,err)
u.Zap(err)
del u
elif img.DataType=="FITS":
# Stokes I
outFile = img.Sources[0].strip()+"ITEMPPOLCAL.fits"
outFile = re.sub('\s','_',outFile) # Deblank filename
x = Image.newPFImage("I",outFile,img.outDisk,True,err)
h = x.Desc.Dict
blc = [int(h["inaxes"][0]//4),int(h["inaxes"][1]//4)]
trc = [int(3*h["inaxes"][0]//4),int(3*h["inaxes"][1]//4)]
stat = imstat(x, err, blc=blc,trc=trc,logfile=None)
IFlux.append(stat["Flux"])
IRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Stokes Q
outFile = img.Sources[0].strip()+"ITEMPPOLCAL.fits"
outFile = re.sub('\s','_',outFile) # Deblank filename
x = Image.newPFImage("Q",outFile,img.outDisk,True,err)
stat = imstat(x, err, blc=blc,trc=trc,logfile=None)
IFlux.append(stat["Flux"])
IRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Stokes U
outFile = img.Sources[0].strip()+"ITEMPPOLCAL.fits"
outFile = re.sub('\s','_',outFile) # Deblank filename
x = Image.newPFImage("Q",outFile,img.outDisk,True,err)
stat = imstat(x, err, blc=blc,trc=trc,logfile=None)
IFlux.append(stat["Flux"])
IRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
out2File = img.Sources[0].strip()+"TEMPPOLCAL2.uvtab"
out2File = re.sub('\s','_',out2File) # Deblank filename
u = UV.newPFUV("UV",outFile,img.outDisk,True,err)
u.Zap(err)
del u
# End accumulate statistics by file type
# End loop over IF
# Give results, compute R-L correction
RLCor = []
import math
mess = " IF IFlux IRMS QFlux QRMS UFlux URMS R-L Corr"
printMess(mess, logfile)
for i in range (0,len(IFlux)):
# REALLY NEED RM Correction!!!!!
cor = RLPhase - 57.296 * math.atan2(UFlux[i],QFlux[i])
RLCor.append(cor)
mess = "%3d %8.3f %8.4f %7.3f %7.4f %7.3f %7.4f %7.2f "%\
(i+1, IFlux[i], IRMS[i], QFlux[i], QRMS[i], UFlux[i], URMS[i], cor)
printMess(mess, logfile)
# Copy gainUse to new highest CL table
if not check:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
hiCL = uv.GetHighVer("AIPS CL")
else:
hiCL = 1
# Copy CL table to be modified
if not check:
EVLACopyTable (uv, uv, "AIPS CL", err, inVer=hiCL, outVer=hiCL+1, \
logfile=logfile, check=check, debug=debug)
if err.isErr:
print("Error copying CL Table")
return 1
# Apply R-L phase corrections
clcor = AIPSTask.AIPSTask("clcor")
try:
clcor.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
clcor.logFile = logfile
if not check:
setname(uv,clcor)
clcor.opcode = "POLR"
clcor.gainver = hiCL+1
clcor.gainuse = hiCL+1
clcor.clcorprm[1:] = RLCor
if debug:
clcor.i
clcor.debug = debug
# Trap failure
try:
if not check:
clcor.g
except Exception as exception:
print(exception)
mess = "CLCOR Failed retCode="+str(clcor.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end R-L Cal
# Plot corrected data?
if doPlot:
if RLPCal:
pSou = RLPCal
else:
pSou = RLDCal[0][0]
atimerange = []
for i in range(0,8):
atimerange.append(0.0)
atimerange[0] = timerange[0]; atimerange[4] = timerange[1];
scr = EVLASpecPlot( uv, pSou, atimerange, refAnt, err, \
flagVer=flagVer, Stokes=["RL","LR"], doband=1, doPol=doPol, PDVer=PDVer, \
plotFile=plotFile, \
check=check, debug=debug, logfile=logfile )
if not scr.UVIsA():
return 0 # tolerate failure
retCode = EVLAWritePlots (scr, 1, 0, plotFile, err, \
plotDesc="R-L phase/delay plots", \
logfile=logfile, check=check, debug=debug)
if retCode!=0:
return retCode
scr.Zap(err)
# end plots
return 0
# end EVLARLCal
def EVLARLCal2(uv, err, uv2 = None, \
RLDCal=None, BChan=1, EChan = 0, \
FQid=0, calcode=" ", doCalib=-1, gainUse=0, \
timerange = [0.,0.,0.,0.,0.,0.,0.,0.], \
doBand=0, BPVer=0, flagVer=-1, \
refAnt=0, doPol=-1, smooth=[0.,0.,0.], dataInt=0., \
RLPCal=None, FOV=0.05, niter = 100, \
nThreads=1, noScrat=[], logfile = "",check=False, debug = False):
"""
Determine R-L delay and phase calibration
Returns task error code, 0=OK, else failed
Calibration applies to (new) highest numbered CL table on uv
* uv = UV data object to clear
* err = Obit error/message stack
* uv2 = If gives, then copy AN table from uv to uv2 and apply same
calibration (intended to calibrate CVel data)
* RLPCal = An array of triplets with R-L calibrators:
(name, R-L phase (deg at 1 GHz), RM (rad/m**2))
If None no R-L cal
* RLDCal = R-L delay calibrator name or list
If None no R-L delay cal
* BChan = First (1-rel) channel number
* EChan = Highest channel number. 0=> high in data.
* FQid = Frequency Id to process
* calcode = Calibrator code
* doCalib = Apply calibration table, positive=>calibrate
* gainUse = CL/SN table to apply
* timerange= time range of data (aips format)
* doBand = If >0.5 apply previous bandpass cal.
* BPVer = previous Bandpass table (BP) version
* flagVer = Flagging table to apply
* refAnt = Reference antenna REQUIRED
* doPol = Apply polarization cal?
* smooth = Channel smoothing function
* dataInt = Data integration time (sec)
* FOV = field of view radius (deg) needed to image RLPCal
* niter = Number of iterations of CLEAN in R-L cal
* noScrat = list of AIPS disks to avoid for scratch files
* nThreads = Number of threads to use in imaging
* logfile = Log file for task
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
"""
################################################################
mess = "R-L polarization calibration "
printMess(mess, logfile)
# Want R-L delay cal?
if RLDCal!=None:
rldly=ObitTask.ObitTask("rldly")
try:
rldly.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
rldly.logFile = logfile
if not check:
setname(uv,rldly)
if type(RLDCal)!=list:
rldly.calsour[1]=RLDCal
else:
i = 1
for t in RLDCal:
rldly.calsour[i] = t
i += 1
i = 1
for t in timerange:
rldly.timerang[i] = t
i += 1
rldly.bchan = BChan
rldly.echan = EChan
rldly.docalib = doCalib
rldly.gainuse = gainUse
rldly.flagver = flagVer
rldly.freqid = FQid
rldly.calcode = calcode
rldly.dopol = doPol
rldly.smooth[1]=smooth[0]; rldly.smooth[2]=smooth[1];rldly.smooth[3]=smooth[2];
rldly.doband = doBand
rldly.bpver = BPVer
rldly.flagver = flagVer
rldly.refant = refAnt
rldly.solint = dataInt
if debug:
print("timerange", rldly.timerang)
rldly.i
# Trap failure
try:
if not check:
rldly.g
except Exception as exception:
print(exception)
mess = "rldly Failed retCode="+str(rldly.retCode)
printMess(mess, logfile)
return 1
else:
pass
# Get new CL table number
if not check:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
gainUse = uv.GetHighVer("AIPS CL")
# end R-L delay cal
# R-L phase cal
if RLPCal!=None:
ncal = len(RLPCal) # How many calibrators?
img = ObitTask.ObitTask("Imager")
try:
img.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
img.taskLog = logfile
if not check:
setname(uv,img)
img.doCalib = doCalib
img.gainUse = gainUse
img.flagVer = flagVer
img.doPol = True
img.Stokes = "IQU"
img.FOV = FOV
img.Niter = niter
img.autoWindow = True
img.dispURL = "None"
img.Catalog = "None"
img.nThreads = nThreads
img.noScrat = noScrat
img.prtLv = 2
# Temporary output files
if img.DataType=="AIPS":
img.outName = "TEMP"
img.outClass= "IPOLCL"
img.outDisk = img.inDisk
img.outSeq = 6666
img.out2Name = "TEMP"
img.out2Class= "IPOLCL"
img.out2Disk = img.inDisk
img.out2Seq = 7777
elif img.DataType=="FITS":
img.outFile = "TEMPPOLCAL.fits"
img.outDisk = img.inDisk
img.out2File = "TEMPPOLCAL2.uvtab"
img.out2Disk = img.inDisk
# How many IFs?
if not check:
h = uv.Desc.Dict
if h["jlocif"]>=0:
nif = int(h["inaxes"][["jlocif"]])
else:
nif = 1
else:
nif = 1
# Loop over calibrators
SouCal = []
for ical in range (0,ncal):
img.Sources[0]= RLPCal[ical][0]
#rlpass.RLPhase = RLPCal[ical][1]
#rlpass.RM = RLPCal[ical][2]
# Loop over IF imaging I,Q, U
# Lists of flux densities and RMSes
IFlux = []
IRMS = []
QFlux = []
QRMS = []
UFlux = []
URMS = []
for iif in range (1, nif+1):
img.BIF = iif
img.EIF = iif
#img.dispURL = "ObitView" # DEBUG
#img.debug=True # DEBUG
if debug:
img.i
img.debug = debug
# Trap failure
try:
if not check:
img.g
except Exception as exception:
print(exception)
mess = "Imager Failed retCode="+str(img.retCode)
printMess(mess, logfile)
return 1
else:
pass
if check: # Don't bother if only checking
continue
# Get fluxes from Summed CCs, RMS from inner quarter of images
if img.DataType=="AIPS":
outName = (img.Sources[0].strip()+"TEMP")[0:12]
outDisk = img.outDisk
outSeq = 6666
# Stokes I
outClass="IPOLCL"
x = Image.newPAImage("I",outName[0:12], outClass[0:6], outDisk,outSeq,True,err)
SumCC = EVLAGetSumCC (x,err)
h = x.Desc.Dict
blc = [int(h["inaxes"][0]//4),int(h["inaxes"][1]//4)]
trc = [int(3*h["inaxes"][0]//4),int(3*h["inaxes"][1]//4)]
stat = imstat(x, err, blc=blc,trc=trc, logfile=logfile)
IFlux.append(SumCC)
IRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Stokes Q
outClass="QPOLCL"
x = Image.newPAImage("Q",outName[0:12], outClass[0:6], outDisk,outSeq,True,err)
SumCC = EVLAGetSumCC (x,err)
stat = imstat(x, err, blc=blc,trc=trc, logfile=logfile)
QFlux.append(SumCC)
QRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Stokes U
outClass="UPOLCL"
x = Image.newPAImage("U",outName[0:12], outClass[0:6], outDisk,outSeq,True,err)
SumCC = EVLAGetSumCC (x,err)
stat = imstat(x, err, blc=blc,trc=trc, logfile=logfile)
UFlux.append(SumCC)
URMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Delete UV output
out2Name = (img.Sources[0].strip()+"TEMP")[0:12]
out2Class="IPOLCL"
out2Disk = img.inDisk
out2Seq = 7777
u = UV.newPAUV("UV",out2Name,out2Class,out2Disk,out2Seq,True,err)
u.Zap(err)
del u
elif img.DataType=="FITS":
# Stokes I
outFile = img.Sources[0].strip()+"ITEMPPOLCAL.fits"
x = Image.newPFImage("I",outFile,img.outDisk,True,err)
SumCC = EVLAGetSumCC (x,err)
h = x.Desc.Dict
blc = [int(h["inaxes"][0]//4),int(h["inaxes"][1]//4)]
trc = [int(3*h["inaxes"][0]//4),int(3*h["inaxes"][1]//4)]
stat = imstat(x, err, blc=blc,trc=trc, logfile=logfile)
IFlux.append(SumCC)
IRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Stokes Q
outFile = img.Sources[0].strip()+"ITEMPPOLCAL.fits"
x = Image.newPFImage("Q",outFile,img.outDisk,True,err)
SumCC = EVLAGetSumCC (x,err)
stat = imstat(x, err, blc=blc,trc=trc, logfile=logfile)
QFlux.append(SumCC)
QRMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
# Stokes U
outFile = img.Sources[0].strip()+"ITEMPPOLCAL.fits"
x = Image.newPFImage("Q",outFile,img.outDisk,True,err)
SumCC = EVLAGetSumCC (x,err)
stat = imstat(x, err, blc=blc,trc=trc, logfile=logfile)
UFlux.append(SumCC)
URMS.append(stat["RMSHist"])
x.Zap(err) # Cleanup
del x
out2File = img.Sources[0].strip()+"TEMPPOLCAL2.uvtab"
u = UV.newPFUV("UV",outFile,img.outDisk,True,err)
u.Zap(err)
del u
# End accumulate statistics by file type
# End loop over IF
# Save source info
SouCal.append({"name":img.Sources[0],"Phase":RLPCal[ical][1],"RM":RLPCal[ical][2], \
"IFlux":IFlux, "IRMS":IRMS, "QFlux":QFlux, "QRMS":QRMS, \
"UFlux":UFlux, "URMS":URMS})
# end loop over calibrators
# Give results, weighted compute R-L correction
import math
mess = '\n R-L Phase calibration results'
printMess(mess, logfile)
RLCor = []
RLCorRSum = []
RLCorISum = []
RLCorWt = []
# Zero accumulators
for i in range (0,len(IFlux)):
RLCorRSum.append(0.0)
RLCorISum.append(0.0)
RLCorWt.append(0.0)
for ical in range (0,ncal):
IFlux = SouCal[ical]["IFlux"]
IRMS = SouCal[ical]["IRMS"]
QFlux = SouCal[ical]["QFlux"]
QRMS = SouCal[ical]["QRMS"]
UFlux = SouCal[ical]["UFlux"]
URMS = SouCal[ical]["URMS"]
RLPhase = SouCal[ical]["Phase"]
RM = SouCal[ical]["RM"]
mess = SouCal[ical]["name"]+"\n IF IFlux IRMS QFlux QRMS UFlux URMS R-L Corr Wt"
printMess(mess, logfile)
for i in range (0,len(IFlux)):
# REALLY NEED RM Correction!!!!!
cor = RLPhase - 57.296 * math.atan2(UFlux[i],QFlux[i])
if cor>180:
cor -= 360.0
if cor<-180:
cor += 360.0
wt = (QFlux[i]**2 + UFlux[i]**2) /(QRMS[i]**2 + URMS[i]**2) # weight from SNR
RLCorRSum[i] += (math.cos(cor/57.296)*wt)
RLCorISum[i] += (math.sin(cor/57.296)*wt)
RLCorWt[i] += wt
mess = "%3d %8.3f %8.3f %7.3f %7.3f %7.3f %7.3f %8.3f %7.1f "% \
(i+1, IFlux[i], IRMS[i], QFlux[i], QRMS[i], UFlux[i], URMS[i], cor, wt)
printMess(mess, logfile)
# Copy gainUse to new highest CL table
if not check:
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
hiCL = uv.GetHighVer("AIPS CL")
else:
hiCL = 1
# end loop over calibrators
# Loop making weighted average correction
mess = "\n\n Weighted average corrections\n IF R-L Corr"
printMess(mess, logfile)
for i in range (0,len(IFlux)):
if RLCorWt[i]>0.0:
corr = RLCorRSum[i]
cori = RLCorISum[i]
cor = math.atan2(cori,corr)*57.296
else:
cor = 0.0
mess = "%3d %7.3f "% (i+1, cor)
printMess(mess, logfile)
RLCor.append(cor)
# end loop making weighted average
# If calibrating second uv data, copy AN table 1
if uv2:
z = uv2.ZapTable("AIPS AN",1,err)
EVLACopyTable (uv, uv2, "AIPS AN", err, \
logfile=logfile, check=check, debug=debug)
if err.isErr:
print("Error copying AN Table")
return 1
# Copy CL table to be modified (CLCOR buggy)
if not check:
EVLACopyTable (uv, uv, "AIPS CL", err, inVer=hiCL, outVer=hiCL+1, \
logfile=logfile, check=check, debug=debug)
if err.isErr:
print("Error copying CL Table")
return 1
# Apply R-L phase corrections
clcor = AIPSTask.AIPSTask("clcor")
try:
clcor.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
clcor.logFile = logfile
if not check:
setname(uv,clcor)
clcor.opcode = "POLR"
clcor.gainver = hiCL+1
clcor.gainuse = hiCL+1
clcor.clcorprm[1:] = RLCor
if debug:
clcor.i
clcor.debug = debug
# Trap failure
try:
if not check:
clcor.g
except Exception as exception:
print(exception)
mess = "CLCOR Failed retCode="+str(clcor.retCode)
printMess(mess, logfile)
return 1
else:
pass
# If calibrating second uv data, run clcor
if uv2:
mess = "Also calibrate Secondary UV data"
printMess(mess, logfile)
if not check:
setname(uv2,clcor)
# Open and close image to sync with disk
uv2.Open(UV.READONLY, err)
uv2.Close(err)
hiCL = uv2.GetHighVer("AIPS CL")
# Copy CL table to be modified (CLCOR buggy)
EVLACopyTable (uv, uv, "AIPS CL", err, inVer=hiCL, outVer=hiCL+1, \
logfile=logfile, check=check, debug=debug)
if err.isErr:
print("Error copying CL Table")
return 1
clcor.gainver = hiCL+1
clcor.gainuse = hiCL+1
if debug:
clcor.i
clcor.debug = debug
# Trap failure
try:
if not check:
clcor.g
except Exception as exception:
print(exception)
mess = "CLCOR Failed retCode="+str(clcor.retCode)
printMess(mess, logfile)
return 1
else:
pass
# end R-L Cal
return 0
# end EVLARLCal2
def EVLAReportTargets(uv, err, FreqID=1, Sources=None, seq=1, sclass="IClean", \
Stokes="I", logfile='', check=False, debug=False):
"""
Generate report info for a list of targets in AIPS files
Returns a report which is a list of dicts, each of which contains
=========== ==========================================
"Source" Source name
"haveImage" True if images were made,
"ObsDate" Observing date as "yyyy-mm-dd"
"numVis" Number of visibilities (ignoring flagging)
"Exposure" Total integration time (day)
"RA" Source RA (deg) at standard equinox
"Dec" Source Dec (deg) at standard equinox
"IFlux" Source Table IPol flux per IF
"QFlux" Source Table QPol flux per IF
"UFlux" Source Table UPol flux per IF
"VFlux" Source Table VPol flux per IF
=========== ==========================================
following present if haveImage True
======== ==============================================
"RAPnt" Antenna pointing RA (deg) at standard equinox
"DecPnt" Antenna pointing Dec (deg) at standard equinox
"Freq" Reference frequency (Hz)
"BW" Image bandwidth (Hz)
"Size" Width of image in deg (From Stokes I)
"Cells" Cell spacing in deg (From Stokes I)
======== ==============================================
for each s in Stokes:
======= ===============================
"sSum" Sum of clean components in Jy
"sPeak" Peak pixel brightness in Jy
"sRMS" RMS noise in inner quarter (Jy)
"sBeam" Beam (maj, min, PA) (deg)
======= ===============================
* uv = UV data object
* err = Python Obit Error/message stack
* Sources = Source name or list of names to use
If an empty list all sources in uv are included
* seq = sequence number of images
* sclass = Image class, first character replaced with char in Stokes
* FreqID = Frequency group identifier
* Stokes = Stokes parameters of images
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
mess = "Generate source statistics "
printMess(mess, logfile)
# If list empty get all sources
if type(Sources)==list:
sl = Sources
else:
sl = [Sources]
if len(sl)<=0:
slist = EVLAAllSource(uv,err,logfile=logfile,check=check,debug=debug)
else:
slist = sl
# Init output
Report = []
# Image disk assumed same as uv
disk = uv.Disk
user = OSystem.PGetAIPSuser()
# Loop over slist
hd = uv.Desc.Dict
for sou in slist:
sdict = {"Source":sou, "haveImage":False} # Init source structure
sdict["ObsDate"] = uv.Desc.Dict["obsdat"]
# Observing stats
obstat = EVLAGetTimes (uv, sou, err, logfile=logfile, check=check, debug=debug)
sdict["numVis"] = obstat["numVis"]
sdict["Exposure"] = obstat["Exposure"]
sdict["RA"] = obstat["RA"]
sdict["Dec"] = obstat["Dec"]
sdict["IFlux"] = obstat["IFlux"]
sdict["QFlux"] = obstat["QFlux"]
sdict["UFlux"] = obstat["UFlux"]
sdict["VFlux"] = obstat["VFlux"]
# Test if image exists
cno = AIPSDir.PTestCNO(disk, user, sou, Stokes[0:1]+sclass[1:], "MA", seq, err)
if cno <= 0 :
Report.append(sdict) # Save source info
continue
# Image statistics, loop over Stokes
for s in Stokes:
klass = s+sclass[1:]
x = Image.newPAImage(s, sou, klass, disk, seq, True, err)
hd = x.Desc.Dict
sdict[s+"Beam"] = (hd["beamMaj"],hd["beamMin"],hd["beamPA"])
# Some from Stokes I only
if s == 'I':
sdict["haveImage"] = True
sdict["Size"] = hd["inaxes"][1]*hd["cdelt"][1]
sdict["Cells"] = hd["cdelt"][1]
sdict["RAPnt"] = hd["obsra"]
sdict["DecPnt"] = hd["obsdec"]
sdict["Freq"] = hd["crval"][hd["jlocf"]]
sdict["BW"] = hd["cdelt"][hd["jlocf"]]
blc = [int(hd["inaxes"][0]//4),int(hd["inaxes"][1]//4)]
trc = [int(3*hd["inaxes"][0]//4),int(3*hd["inaxes"][1]//4)]
stat = imstat(x,err,blc=blc,trc=trc) # Image statistics inner quarter
if abs(stat["Max"]) > abs(stat["Min"]):
sdict[s+"Peak"] = stat["Max"]
else:
sdict[s+"Peak"] = stat["Min"]
sdict[s+"RMS"] = stat["RMSHist"]
sdict[s+"Sum"] = EVLAGetSumCC(x, err, logfile=logfile, check=check, debug=debug)
del x
# End stokes image loop
Report.append(sdict) # Save source info
# end loop over sources
# Give terse listing
if hd:
mess = "\n Summary at frequency = "+"%8.3f"%(hd["crval"][hd["jlocf"]]*1.0e-9)+" GHz on "+ \
uv.Desc.Dict["obsdat"]
printMess(mess, logfile)
for sdict in Report:
mess = "\n Source = "+sdict["Source"]+", Exposure="+"%5.3f"%(sdict["Exposure"]*24.)+" hr"
printMess(mess, logfile)
if "IBeam" in sdict:
mess = "IPol Beam = ("+"%8.3f"%(sdict["IBeam"][0]*3600.0)+", %8.3f"%(sdict["IBeam"][1]*3600.0)+ \
", %6.1f"%(sdict["IBeam"][2])+") asec, asec, deg"
printMess(mess, logfile)
else:
continue # Nothing to report
# Source table flux densities
if "IFlux" in sdict:
n = len(sdict["IFlux"])
for i in range(0,n):
mess = "IF "+str(i+1)+" IPol="+"%8.4f"%(sdict["IFlux"][i])+ \
", QPol="+"%8.4f"%(sdict["QFlux"][i])+ \
", UPol="+"%8.4f"%(sdict["UFlux"][i])+ \
", VPol="+"%8.4f"%(sdict["VFlux"][i])
printMess(mess, logfile)
for s in Stokes:
mess = "Stokes "+s+" Sum CC="+"%8.4f"%(sdict[s+"Sum"])+", Peak="+"%8.4f"%(sdict[s+"Peak"])+ \
", RMS="+"%8.5f"%(sdict[s+"RMS"])+" Jy"
printMess(mess, logfile)
# Polarization
if Stokes=="IQU":
ppolSum = (sdict["QSum"]**2 + sdict["USum"]**2)**0.5
ppolPeak = (sdict["QPeak"]**2 + sdict["UPeak"]**2)**0.5
RLSum = 57.296*math.atan2(sdict["USum"], sdict["QSum"])
RLPeak = 57.296*math.atan2(sdict["UPeak"],sdict["QPeak"])
mess = "Sum CC PPol="+"%8.4f"%(ppolSum)+", R=L Phase="+"%8.2f"%(RLSum)+ \
"; Peak PPol="+"%8.4f"%(ppolPeak)+", R=L Phase="+"%8.2f"%(RLPeak)
printMess(mess, logfile)
# End terse listing
return Report
# end EVLAReportTargets
def EVLAGetSumCC(image, err, CCver=1,
logfile='', check=False, debug=False):
"""
Sum fluxes in a CC table
Sums the flux densities in a CC Table on an image
Returns sum
Returns with err set on error
* image = Image with CC table
* err = Python Obit Error/message stack
* CCver = CC table to sum
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
if check:
return 0.0
if debug:
return 0.0
# Open and close image to sync with disk
image.Open(Image.READONLY, err)
image.Close(err)
# Anything there?
ver = image.GetHighVer("AIPS CC")
if ver<1:
return 0.0
CCTab = image.NewTable(Table.READONLY, "AIPS CC", CCver, err)
if err.isErr:
return 0.0
# Open
CCTab.Open(Table.READONLY, err)
if err.isErr:
return 0.0
# Number of rows
nrow = CCTab.Desc.Dict["nrow"]
sum = 0.0
# Loop over table
for irow in range (1, nrow+1):
CCrow = CCTab.ReadRow(irow, err)
if err.isErr:
return sum
sum += CCrow["FLUX"][0]
# End loop over table
# Close table
CCTab.Close(err)
if err.isErr:
return sum
return sum
# end EVLAGetSumCC
def EVLAGetTimes(uv, Source, err,
logfile='', check=False, debug=False):
"""
Lookup observing times and number of visibilities for a source, other info
Return dict {"numVis":no vis, "Exposure":Total integration time (day),
"RA": RA@equinox, "Dec" Dec@equinox,
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
* uv = UV data with AIPS SU and AIPS NX tables
* Source = Source to lookup
* err = Python Obit Error/message stack
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
if check:
return {"numVis":0, "Exposure":0.0, "RA":0.0, "Dec":0.0}
# Open and close uv to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
# Lookup Source ID (SouID)
SUtab = uv.NewTable(Table.READONLY, "AIPS SU", 1, err)
SUtab.Open(Table.READONLY, err)
if err.isErr:
return {"numVis":0, "Exposure":0.0, "RA":0.0, "Dec":0.0}
# Number of rows
nrow = SUtab.Desc.Dict["nrow"]
for i in range (0,nrow): # Loop over rows
SUrow = SUtab.ReadRow(i+1, err)
if err.isErr:
return {"numVis":0, "Exposure":0.0, "RA":0.0, "Dec":0.0}
SouID = SUrow["ID. NO."][0]
RA = SUrow["RAEPO"][0]
Dec = SUrow["DECEPO"][0]
IFlux = SUrow["IFLUX"]
QFlux = SUrow["QFLUX"]
UFlux = SUrow["UFLUX"]
VFlux = SUrow["VFLUX"]
#if debug:
# mess="Source "+Source+" test "+SUrow["SOURCE"][0]+" ID ="+str(SouID)+ \
# " match="+str(SUrow["SOURCE"][0].rstrip()==Source.rstrip())
# printMess(mess, logfile)
if SUrow["SOURCE"][0].rstrip()==Source.rstrip(): # This it?
break;
SUtab.Close(err)
if err.isErr:
return {"numVis":0, "Exposure":0.0, "RA":RA, "Dec":Dec, \
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
# get observing stats from AIPS NX table
cntVis = 0
sumTime = 0.0
NXTab = uv.NewTable(Table.READONLY, "AIPS NX", 1, err)
if err.isErr:
return {"numVis":0, "Exposure":0.0, "RA":RA, "Dec":Dec, \
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
# Open
NXTab.Open(Table.READONLY, err)
if err.isErr:
return {"numVis":cntVis, "Exposure":sumTime, "RA":RA, "Dec":Dec, \
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
# Number of rows
nrow = NXTab.Desc.Dict["nrow"]
# Loop over table
for irow in range (1, nrow+1):
NXrow = NXTab.ReadRow(irow, err)
if err.isErr:
return {"numVis":cntVis, "Exposure":sumTime, "RA":RA, "Dec":Dec, \
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
# Is this the desired source?
if NXrow["SOURCE ID"][0]==SouID:
sumTime += NXrow["TIME INTERVAL"][0]
cntVis += NXrow["END VIS"][0] - NXrow["START VIS"][0] + 1
# End loop over table
# Close table
NXTab.Close(err)
if err.isErr:
return {"numVis":cntVis, "Exposure":sumTime, "RA":RA, "Dec":Dec, \
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
if debug:
mess="EVLAGetTimes: Source "+Source+"="+str(SouID)+" numVis="+str(cntVis)+ \
" Integration time = "+"%5.3f"%(sumTime*24.)+" hr"
printMess(mess, logfile)
return {"numVis":cntVis, "Exposure":sumTime, "RA":RA, "Dec":Dec, \
"IFlux":IFlux, "QFlux":QFlux, "UFlux":UFlux, "VFlux":VFlux}
# end EVLAGetTimes
def EVLAImageTargets(uv, err, Sources=None, FreqID=1, seq=1, sclass="IClean", band="", \
doCalib=-1, gainUse=0, doBand=-1, BPVer=0, flagVer=-1, \
doPol=False, PDVer=-1, minFlux=0.0, Beam=[0.,0.,0.], \
Stokes="I", FOV=0.1/3600.0, Robust=0, Niter=300, CleanRad=None, \
maxPSCLoop=0, minFluxPSC=0.1, solPInt=20.0/60., \
solPMode="P", solPType= " ", UVRange=[0.,0.], timeRange=[0.,0.], \
maxASCLoop=0, minFluxASC=0.5, solAInt=2.0, \
solAMode="A&P", solAType= " ", \
doOutlier=None, OutlierDist=None, OutlierFlux=None, \
avgPol=False, avgIF=False, minSNR = 5.0, refAnt=0, \
do3D=True, BLFact=0.999, BLchAvg=False, \
doMB=False, norder=2, maxFBW=0.05, doComRes=False, \
PBCor=True, antSize=24.5, nTaper=0, Tapers=[20.0], \
nThreads=1, noScrat=[], logfile='', check=False, debug=False):
"""
Image a list of sources with optional selfcal
Uses Imager or MFImage to image a list of sources.
Data must be at least approximately calibrated
Returns task error code, 0=OK, else failed
* uv = UV data object
* err = Python Obit Error/message stack
* Sources = Source name or list of names to use
If an empty list all sources in uv are included
* seq = sequence number of output
* sclass = Image output class
* band = project band - appended to name
* FreqID = Frequency group identifier
* doCalib = Apply calibration table
* gainUse = CL/SN table to apply
* doBand = If >0.5 apply bandpass cal.
* BPVer = Bandpass table version
* flagVer = Input Flagging table version
* doPol = Apply polarization cal?
* PDVer = PD version for pol cal, -1=>use IF
* minFlux = minimum flux density for initial CLEAN
* Stokes = Stokes parameters to image
* timeRange = time range (days) to image, [0,0]=>all
* FOV = Field of view to image in deg
* Robust = Weighting robustness parameter
* Niter = max no. iterations
* Beam = Clean restoring beam
* UVRange = Imaging UV range in kLambda, 0s => all
* CleanRad = CLEAN radius about center or None=autoWin
* maxPSCLoop = max. number of phase sc loops
* minFluxPSC = Trip level for phase self cal (Jy)
* solPInt = Phase solution interval (min)
* solPMode = Phase soln mode "P", "DELA"
* solPType = Phase soln type
* maxASCLoop = max. number of amp&phase sc loops
* minFluxASC = Trip level for amp&phase self cal (Jy)
* solAInt = Amp&phase solution interval (min)
* solAMode = Amp&Phase soln mode "A&P", "P", "DELA"
* solAType = Amp&PPhase soln type
* avgPol = Average poln in SC?
* avgIF = Average IFs in SC?
* minSNR = minimum acceptable SNR in SC
* refAnt = Reference antenna
* do3D = Use 3D imaging?
* doComRes = Force common resolution in frequency? (MF)
* BLFact = Baseline dependent averaging factor
* BLchAvg = If True and BLFact>=1.0 also average channels
* doOutlier = Outliers from NVSS? Yes=> 4*FOV, 10 mJy >1 GHz else 100 mJy
None = Default, Yes if freq<6 GHz
* OutlierDist = maximum distance for outliers (deg) None = Default
* OutlierFlux = minimum flux density for outliers (Jy) None = Default
* doMB = If True is wideband imaging
* norder = order on wideband imaging
* maxFBW = max. fractional wideband imaging
* PBCor = Do PB correction on final image?
* antSize = antenna size (m) for PBCor
* nTaper = number of (additional) multi resolution tapers
* Tapers = Sizes of additional tapers in pixels
* nThreads = Max. number of threads to use
* noScrat = list of disks to avoid for scratch files
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
mess = "Image a list of sources "
printMess(mess, logfile)
# Tolerate missing BP table
# Open and close image to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
hiBP = uv.GetHighVer("AIPS BP")
if hiBP<=0:
doBand = -1
hiPD = uv.GetHighVer("AIPS PD")
if hiPD<=0:
doPol=False; PDVer=-1; Stokes="I"
# Tests if have full poln
nstoke = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
if nstoke<4:
doPol=False; PDVer=-1; Stokes="I"
# get reference Freq
refFreq = uv.Desc.Dict["crval"][uv.Desc.Dict["jlocf"]]
# If list empty get all sources
if type(Sources)==list:
sl = Sources
else:
sl = [Sources]
if len(sl)<=0:
slist = EVLAAllSource(uv,err,logfile=logfile,check=check,debug=debug)
else:
slist = sl
if doMB:
imager = ObitTask.ObitTask("MFImage")
try:
imager.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
imager.norder = norder
imager.maxFBW = maxFBW
imager.prtLv = 2
else:
imager = ObitTask.ObitTask("Imager")
try:
imager.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
imager.prtLv = 2
imager.taskLog = logfile
if not check:
setname(uv,imager)
imager.outDisk = imager.inDisk
#imager.outName = "_"+band
imager.out2Name = "_"+band
imager.out2Disk = imager.inDisk
imager.outSeq = seq
imager.out2Seq = seq
imager.outClass = sclass
imager.BLFact = BLFact
imager.BLchAvg = BLchAvg
imager.flagVer = flagVer
imager.doCalib = doCalib
imager.gainUse = gainUse
imager.doBand = doBand
imager.BPVer = BPVer
imager.doPol = doPol
if "PDVer" in imager.__dict__:
imager.PDVer = PDVer
imager.Stokes = Stokes
imager.FOV = FOV
imager.Robust = Robust
imager.Niter = Niter
imager.Beam = Beam
imager.UVRange = UVRange
imager.timeRange = timeRange
imager.minFlux = minFlux
imager.maxPSCLoop = maxPSCLoop
imager.minFluxPSC = minFluxPSC
imager.solPInt = solPInt
imager.solPMode = solPMode
imager.solPType = solPType
imager.maxASCLoop = maxASCLoop
imager.minFluxASC = minFluxASC
imager.solAInt = solAInt
imager.solAMode = solAMode
imager.solAType = solAType
imager.avgPol = avgPol
imager.avgIF = avgIF
imager.refAnt = refAnt
imager.minSNR = minSNR
imager.do3D = do3D
imager.dispURL = "None"
imager.PBCor = PBCor
imager.antSize = antSize
imager.nTaper = nTaper
imager.Tapers = Tapers
if doOutlier or ((doOutlier==None) and refFreq<6.0e9):
FWHM = (45.0 /(refFreq*1.0e-9) ) / 60. # 25 m ant FWHM in deg
imager.OutlierDist = FWHM*4.0 # Outliers from NVSS/SUMMS for lower frequencies
if refFreq>1.0e9:
imager.OutlierFlux = 0.01
else:
imager.OutlierFlux = 0.10
# Actual values if given
if OutlierDist!=None:
imager.OutlierDist = OutlierDist
if OutlierFlux!=None:
imager.OutlierFlux = OutlierFlux
# Auto window or centered box
if CleanRad:
imager.CLEANBox=[-1,CleanRad,0,0]
else:
imager.autoWindow = True
if "doComRes" in imager.__dict__:
imager.doComRes = doComRes
imager.noScrat = noScrat
imager.nThreads = nThreads
if debug:
imager.prtLv = 5
imager.i
imager.debug = debug
OK = False # Some must work
# Loop over slist
for sou in slist:
if doOutlier or ((doOutlier==None) and refFreq<6.0e9):
# Use NVSS north of dec 30 S and SUMMS southward
suinfo = EVLAGetTimes(uv, sou, err, logfile=logfile, check=check,debug=debug)
if suinfo["Dec"]<-30.0:
imager.Catalog = 'SUMMSVZ.FIT'
del suinfo
imager.Sources[0] = sou
mess = "Image "+sou
printMess(mess, logfile)
# Trap failure
try:
if not check:
imager.g
except Exception as exception:
print(exception)
mess = "Imager Failed retCode= "+str(imager.retCode)
printMess(mess, logfile)
#return 1 Allow some failures
# Cleanup image mess
AllDest(err,Atype="MA",Aname=imager.Sources[0], disk=imager.outDisk, Aseq=imager.outSeq);
else:
OK = True
# delete Imager file if not debug
if not debug:
out2Name = imager.Sources[0].strip()+"_"+band
out2Name = out2Name[0:12]
if doMB:
out2Class = "MFImage"
else:
out2Class = "Imager"
# Tolerate failures
try:
# Test if file exists
cno = AIPSDir.PTestCNO(imager.out2Disk, OSystem.PGetAIPSuser(), \
out2Name, out2Class, "UV", imager.out2Seq, err)
if cno>0:
u = UV.newPAUV("zap", out2Name, out2Class, imager.out2Disk, imager.out2Seq, False, err)
if UV.PIsA(u):
u.Zap(err) # cleanup
if err.isErr:
mess = "Error deleting Imager work file"
printMess(mess, logfile)
#return 1
del u
except Exception as exception:
print(exception)
mess = "Imager Cleanup Failed source= "+imager.Sources[0].strip()+"_"+band
printMess(mess, logfile)
OErr.PClear(err) # Clear any message/error
#return 1 Allow some failures
else:
pass
# end loop over sources
# Something work?
if not OK:
printMess("All images failed", logfile)
return 1
return 0
# end EVLAImageTargets
def EVLAAllSource(uv, err, \
logfile='', check=False, debug=False):
"""
Returns List of all sources, empty if no SU table
* uv = UV data object
* err = Python Obit Error/message stack
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
# Open and close uv to sync with disk
uv.Open(UV.READONLY, err)
uv.Close(err)
allSou = []
if check:
return allSou
# Is there an SU table?
hiver = uv.GetHighVer("AIPS SU")
if hiver<=0:
mess = str(nrow)+" sources in database"
printMess("No SoUrce table found", logfile)
return allSou
mess = "List of sources in database"
printMess(mess, logfile)
SUTab = uv.NewTable(Table.READONLY, "AIPS SU", 1, err)
if err.isErr:
return allSou
# Open
SUTab.Open(Table.READONLY, err)
if err.isErr:
return allSou
# Number of rows
nrow = SUTab.Desc.Dict["nrow"]
if debug:
mess = str(nrow)+" sources in database"
printMess(mess, logfile)
for i in range (0,nrow): # Loop over rows
SUrow = SUTab.ReadRow(i+1, err)
if err.isErr:
return
allSou.append(SUrow["SOURCE"][0].strip())
mess = "Source("+str(i+1)+") = "+SUrow["SOURCE"][0]
printMess(mess, logfile)
# end loop over rows
# Close table
SUTab.Close(err)
if err.isErr:
return allSou
return allSou
# end EVLAAllSource
def EVLAPlotTab(uv, inext, invers, err, \
source=None, timerang=[0.,0.,0.,0.,0.,0.,0.,0.], \
stokes="HALF", optype=" ", opcode=" ", nplots=1, \
logfile=None, check=False, debug=False):
"""
Makes AIPS plots of tables
Returns task error code, 0=OK, else failed
* uv = UV data object to plot
* inext = AIPS Table ("SN", "CL", "TY", "PC")
* inver = version number, 0-> highest
* source = if given the name of the source
* timerang = timerange to plot.
* stokes = Stokes type to plot
* optype = Data to be plotted (see help snplt)
* opcode = Plot type (see help snplt)
* nplots = number of plots per page
* err = Obit error/message stack
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
snplt = AIPSTask.AIPSTask("snplt")
try:
snplt.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,snplt)
snplt.inext = inext
snplt.invers = invers
snplt.optype = optype
snplt.opcode = opcode
snplt.nplots = nplots
snplt.stokes = stokes
snplt.msgkill = 5 # Suppress blather
i = 1
for t in timerang:
snplt.timerang[i] = t
i += 1
snplt.logFile = logfile
if debug:
snplt.i
# Trap failure
try:
if not check:
snplt.g
except Exception as exception:
print(exception)
mess = "SNPLT Failed "
printMess(mess, logfile)
return 1
else:
pass
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
return 0
# end EVLAPlotTab
def EVLAPlotBPTab(uv, invers, err, inext = 'BP', \
source=None, timerang=[0.,0.,0.,0.,0.,0.,0.,0.], \
stokes=" ", \
logfile=None, check=False, debug=False):
"""
Makes AIPS plots of BP tables
Returns task error code, 0=OK, else failed
* uv = UV data object to plot
* inver = version number, 0-> highest
* inext = table type 'BP', 'BD', CP', 'PD'
* source = if given the name of the source
* timerang = timerange to plot.
* stokes = Stokes type to plot
* err = Obit error/message stack
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
bplot = AIPSTask.AIPSTask("bplot")
try:
bplot.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,bplot)
try:
bplot.inext = inext
bplot.invers = invers
except Exception as exception:
pass
bplot.stokes = stokes
bplot.msgkill = 5 # Suppress blather
i = 1
for t in timerang:
bplot.timerang[i] = t
i += 1
bplot.logFile = logfile
if debug:
bplot.i
# Trap failure
try:
if not check:
bplot.g
except Exception as exception:
print(exception)
mess = "BPLOT Failed "
printMess(mess, logfile)
return 1
else:
pass
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
return 0
# end EVLAPlotBPTab
def EVLAWritePlots(uv, loPL, hiPL, plotFile, err, \
plotDesc="Diagnostic plot", \
logfile=None, check=False, debug=False):
"""
Writes plots to an external postscript file
All Plots deleted from AIPS
Returns task error code, 0=OK, else failed
* uv = UV data object to plot
* loPL = Lowest (1-rel) plot number
* hiPL = Highest PL version number (0->all)
* plotFile = plot file
* err = Obit error/message stack
* plotDesc = Description of plot
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
if hiPL<=0 and not check:
hiPL = uv.GetHighVer("AIPS PL")
lwpla = AIPSTask.AIPSTask("lwpla")
try:
lwpla.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,lwpla)
lwpla.plver = max(1, loPL)
lwpla.invers = hiPL
lwpla.outfile = plotFile
lwpla.logFile = logfile
lwpla.msgkill = 5 # Suppress blather - as much as possible
if debug:
lwpla.i
# Trap failure
try:
if not check:
lwpla.g
except Exception as exception:
print(exception)
mess = "Lwpla Failed - continuing anyway"
printMess(mess, logfile)
# return 1 # Continue in spite of lwpla failure
else:
if os.path.exists(plotFile): # May not exist
EVLAAddOutFile(plotFile, 'project', plotDesc, logFile=logfile)
# Delete plot files
if not check:
zz=uv.ZapTable("AIPS PL", -1,err)
return 0
# end EVLAWritePlots
def EVLASpecPlot(uv, Source, timerange, refAnt, err, Stokes=["RR","LL"], \
doband=0, flagVer=0, plotFile="./spec.ps", doPol=False, PDVer=-1, \
check=False, debug=False, logfile = ""):
"""
Plot amplitude and phase across the spectrum.
returns scratch file with plot
Note: possm can't apply flags so data copied to scratch file
Returns task error code, 0=OK, else failed
* uv = uv data object
* Source = Name of source to plot
* timerange = timerange (Obit form) to plot
* refAnt = ref. Ant, only baselines to this antenna plotted
* err = Obit error object
* Stokes = List of stokes types to plot
* doband = do bandpass calibration before plotting (requires BP table)
* flagVer = flag (FG) table version
* doPol = Apply polarization cal?
* PDVer = PD version for pol cal, -1=>use IF
* plotFile = name of output PS file
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* logfile = Log file for task
"""
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return None
# Calibrate and edit data
scr = uv.Scratch(err)
info = uv.List
info.set("doCalSelect",True)
info.set("doCalib",2)
info.set("gainUse",0)
info.set("doBand",doband)
info.set("BPVer",0)
info.set("flagVer",flagVer)
info.set("Sources",[Source])
info.set("Stokes"," ")
info.set("timeRange",timerange)
if doPol:
info.set("doPol", 1)
else:
info.set("doPol", 0)
info.set("PDVer", PDVer)
#uv.Header(err) # DEBUG
# Trap failure
try:
uv.Copy(scr, err)
except Exception as exception:
print(exception)
mess = "Copy plot data failed - continuing"
printMess(mess, logfile)
return None
else:
pass
scr.Info(err) # Get file information
info = uv.List
# Reset selection
info.set("doCalSelect",True)
info.set("doCalib",-1)
info.set("gainUse",0)
info.set("doBand",-1)
info.set("BPVer",0)
info.set("flagVer",0)
info.set("Sources",[" "])
info.set("timeRange",[0.0, 0.0])
info.set("doPol", 0)
info.set("PDVer", -1)
# If data labeled XY pol, relabel RR,LL... - POSSM cannot cope
d = scr.Desc.Dict
if d["crval"][d["jlocs"]]<-4:
d["crval"][d["jlocs"]] = -1.0
scr.Desc.Dict = d
scr.UpdateDesc(err)
# Setup and run POSSM
possm = AIPSTask.AIPSTask("possm")
try:
possm.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
setname(scr, possm)
source = [ Source ] # get BP calibration source, in list format
possm.sources= AIPSTask.AIPSList( source )
timerang = [ timerange[0], 0, 0, 0, timerange[1], 0, 0, 0 ]
possm.timerang = AIPSTask.AIPSList( timerang )
solint = (timerange[1]-timerange[0])*1440.0
possm.baseline[1] = refAnt
possm.flagver = -1 # POSSM can't flag
possm.aparm = AIPSTask.AIPSList( [0] * 10 ) # initialize with zeroes
possm.aparm[1] = -1 # scalar average
possm.aparm[9] = 3 # all IFs and pols in same frame
possm.nplots = 6 # plot each baseline in separate frame on page
possm.ltype = 3 # include all labels
possm.solint = solint # time interval of plot
possm.logFile = logfile
possm.msgkill = 5 # Suppress blather as much as possible
if debug:
possm.i;tput(possm)
# Loop over Stokes
for s in Stokes:
possm.stokes = s
# Trap failure
try:
if not check:
possm.g
except Exception as exception:
print(exception)
mess = "POSSM Failed - continue anyway"
printMess(mess, logfile)
# return 1
else:
pass
# End Stokes loop
return scr
# end EVLASpecPlot
def EVLAApplyCal(uv, err, SNver=0, CLin=0, CLout=0, maxInter=240.0, \
doSelf=False,
logfile=None, check=False, debug=False):
"""
Applies an SN table to a CL table and writes another
Returns task error code, 0=OK, else failed
* uv = UV data object to clear
* err = Obit error/message stack
* SNver = SN table to apply, 0=>highest
* CLin = input CL table, 0=>highest
* CLout = output CL table, 0=>create new
* maxInter = Max time (min) over which to interpolate
* doSelf = If true only apply calibrations to same source
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input, ObitTasks debug
"""
################################################################
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
if not check:
if SNver<=0:
SNver = uv.GetHighVer("AIPS SN")
if CLin<=0:
CLin = uv.GetHighVer("AIPS CL")
if CLout<=0:
CLout = uv.GetHighVer("AIPS CL")+1
if CLin<1:
mess = "No input CL table to update"
printMess(mess, logfile)
uv.Header(err)
return 1
mess = "Update CL "+str(CLin)+" with SN "+str(SNver)+" to CL "+str(CLout)
printMess(mess, logfile)
clcal = ObitTask.ObitTask("CLCal")
try:
clcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,clcal)
clcal.solnVer = SNver
clcal.calIn = CLin
clcal.calOut = CLout
clcal.maxInter = maxInter
if doSelf:
clcal.interMode = "SELF"
clcal.taskLog = logfile
clcal.debug = debug
if debug:
clcal.i
# Trap failure
try:
if not check:
clcal.g
except Exception as exception:
print(exception)
mess = "CLCal Failed retCode="+str(clcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
# End CLCal
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
return 0
# end EVLAApplyCal
def EVLASpectrum(uv, plotSource, plotTime, plotFile, refAnt, err, \
Stokes=["RR","LL"], flagVer=0, doband=-1, doPol=False, PDVer=-1, \
logfile=None, check=False, debug=False):
"""
Spectrum plot of selected data
Returns task error code, 0=OK, else failed
* uv = UV data object to clear
* plotSource = Name of source to plot
* plotTime = timerange (Obit form) to plot
* plotFile = name of output PS file
* refAnt = ref. Ant, only baselines to this antenna plotted
* err = Obit error/message stack
* Stokes = List of stokes types to plot
* flagVer = Flag (FG) table version
* doband = do bandpass calibration before plotting (requires BP table)
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input, ObitTasks debug
"""
################################################################
# POSSM can't apply flags so write scratch file and plot
scr = EVLASpecPlot( uv, plotSource, plotTime, refAnt, err, \
flagVer=flagVer,Stokes=Stokes, doband=doband, doPol=doPol, PDVer=PDVer, \
plotFile=plotFile, check=check, logfile=logfile )
retCode = 0
if scr and scr.UVIsA():
retCode = EVLAWritePlots (scr, 1, 0, plotFile, err, \
plotDesc="Spectrum plots", \
logfile=logfile, check=check, debug=debug)
if retCode!=0:
return 0 # tolerate failure
if scr!=None:
scr.Zap(err)
return 0
# end EVLASpectrum
def EVLAEditSNAmp(uv, SNver, err, \
sigma=20.0, FGver=-1, logfile='', check=False, debug=False):
"""
Flag SN table entries with amplitudes discrepant from IF median
For each IF in an SN table, the median amplitude and the RMS of
the 80% amplitudes least different from the median are determined.
Then SN table entries with amplitudes further from the median than
sigma*RMS are flagged.
Optionally adds entries to flag (FG) table
Returns with err set on error
* uv = UV data object
* SNver = SN table to flag, 0=> highest
* err = Python Obit Error/message stack
* sigma = multiple of inner RMS different from median to flag
Should be pretty big
* FGver = FG table to add flags to, <=0 ->none
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
if SNver<=0:
snver = uv.GetHighVer("AIPS SN")
else:
snver = SNver;
mess = "Edit SN table %d amplitudes by sigma %f" % (snver,sigma)
printMess(mess, logfile)
if FGver>0:
mess = "Also write flagging entries in FG table %d" % (FGver)
printMess(mess, logfile)
# Get statistics
stats = EVLASNAmpStats(uv, snver, err, \
logfile=logfile, check=check, debug=debug)
if stats==None or (len(stats)<=0) or err.isErr:
mess = "Problem with SN table statistics"
printMess(mess, logfile)
return
# Get Median RMS
t = []
for s in stats:
if s!=None:
t.append(s[1])
if len(t)<=0:
mess = "Problem with SN table statistics"
printMess(mess, logfile)
return
t.sort()
RMS = t[int(len(t)//2)]
mess = "Median RMS %f" % (RMS)
printMess(mess, logfile)
# Set clipping levels
cl = []
iif = 1
for s in stats:
if s!=None:
rang = [max(0.0,s[0]-sigma*RMS),s[0]+sigma*RMS]
cl.append(rang)
mess = "IF %d valid range = %s" % (iif,str(rang))
printMess(mess, logfile)
else:
cl.append(None)
mess = "IF %d: Too few data to edit" % (iif)
printMess(mess, logfile)
iif += 1;
# Clip/flag
EVLAClipSNAmp(uv, snver, cl, err,FGver=FGver, \
logfile=logfile, check=check, debug=debug)
if err.isErr:
mess = "Problem with clipping SN table or flagging"
printMess(mess, logfile)
return
# end EVLAEditSNAmp
def EVLAEditBPAmp(uv, BPver, err, \
sigma=10.0, logfile='', check=False, debug=False):
"""
Flag BP table entries with amplitudes discrepant from Antenna median
For each Antenna in a BP table, the median amplitude and the RMS of
the 80% amplitudes least different from the median are determined.
Then BP table entries with amplitudes further from the median than
sigma*RMS are flagged.
Returns with err set on error
* uv = UV data object
* BPver = BP table to flag, 0=> highest
* err = Python Obit Error/message stack
* sigma = multiple of inner RMS different from median to flag
Should be pretty big
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
if BPver<=0:
bpver = uv.GetHighVer("AIPS BP")
else:
bpver = BPver;
mess = "Edit BP table %d amplitudes by sigma %f" % (bpver,sigma)
printMess(mess, logfile)
# Get statistics
stats = EVLABPAmpStats(uv, bpver, err, \
logfile=logfile, check=check, debug=debug)
if stats==None or err.isErr:
mess = "Problem with BP table statistics"
printMess(mess, logfile)
return
# Set clipping levels
cl = []
iant = 1
for s in stats:
if s!=None:
rang = [max(0.0,s[0]-sigma*s[1]),s[0]+sigma*s[1]]
cl.append(rang)
mess = "Ant %d valid range = %s medn %f RMS %f" % (iant,str(rang), s[0],s[1])
printMess(mess, logfile)
else:
cl.append(None)
mess = "Ant %d: Too few data to edit" % (iant)
printMess(mess, logfile)
iant += 1;
# Clip/flag
EVLAClipBPAmp(uv, bpver, cl, err, \
logfile=logfile, check=check, debug=debug)
if err.isErr:
mess = "Problem with clipping BP table or flagging"
printMess(mess, logfile)
return
# end EVLAEditBPAmp
def EVLAFlagFailSN(uv, SNver, err, \
FGver=-1, FGuv=None, logfile='', check=False, debug=False):
"""
Make entries in FG table for times of failed SN solutions
Returns with err set on error
* uv = UV data object
* SNver = SN table to flag, 0=> highest
* err = Python Obit Error/message stack
* FGver = FG table to add flags to, <=0 ->none
* FGuv = None or uv data to write flag s to, None=>uv
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
if SNver<=0:
snver = uv.GetHighVer("AIPS SN")
else:
snver = SNver;
# Flag uv data
if FGuv==None:
outuv = uv
else:
outuv = FGuv
mess = "Failed solutions in SN %d flagged in FG %d" % (snver,FGver)
printMess(mess, logfile)
fblank = FArray.PGetBlank() # Magic blanking value
# Number of IFs
nif = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocif"]]
# Number of Stokes
npoln = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
SNTab = uv.NewTable(Table.READONLY, "AIPS SN", SNver, err, \
numIF=nif, numPol=npoln)
if err.isErr:
return
# Open
SNTab.Open(Table.READWRITE, err)
if err.isErr:
return
# Number of rows
nrow = SNTab.Desc.Dict["nrow"]
count = 0; total = 0
for i in range (0,nrow): # Loop over rows
SNrow = SNTab.ReadRow(i+1, err)
if err.isErr:
return
dirty = False
# Loop over IF
for iif in range (0, nif):
total += 1
if (SNrow["WEIGHT 1"][iif]<=0.0) or (SNrow["REAL1"][iif]==fblank):
# Flag table?
EVLAFlagSNClip(uv, SNrow, iif+1, 1, err, FGver=FGver, FGuv=outuv, reason="Failed soln", \
logfile=logfile, check=check, debug=debug)
count += 1
dirty = True
# Second Poln
if npoln>1:
total += 1
if (npoln>1) and (SNrow["WEIGHT 2"][iif]<=0.0) or (SNrow["REAL2"][iif]==fblank):
# Flag table?
EVLAFlagSNClip(uv, SNrow, iif+1, 2, err, FGver=FGver, FGuv=outuv, reason="Failed soln", \
logfile=logfile, check=check, debug=debug)
count += 1
dirty = True
# Rewrite if modified
if dirty and not check:
SNTab.WriteRow(i+1, SNrow, err)
if err.isErr:
return
# end loop over rows
# Close table
SNTab.Close(err)
if err.isErr:
return
mess = "Flagged %d of total %d Gain entries" % (count, total)
printMess(mess, logfile)
# end EVLAFlagFailSN
def EVLASNAmpStats(uv, SNver, err, logfile='', check=False, debug=False):
"""
Determine median and RMS of inner 80% of amplitude distribution per IF
Returns with err set on error
Return list:
[(medn1, RMS1),...,(mednnumIF,RMSnumIF)]
* uv = UV data object
* SNver = SN table to flag
* err = Python Obit Error/message stack
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
fblank = FArray.PGetBlank() # Magic blanking value
# Number of IFs
nif = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocif"]]
# Number of Stokes
npoln = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
npoln = min(2, npoln) # No more than 2
SNtab = uv.NewTable(Table.READONLY, "AIPS SN", SNver, err, \
numIF=nif, numPol=npoln)
if err.isErr:
return None
# Open
SNtab.Open(Table.READWRITE, err)
if err.isErr:
return None
# Number of SN rows
nrow = SNtab.Desc.Dict["nrow"]
# Initialize accumulators, 1 per IF
# Each a list of amplitudes
amps = []
for i in range(0,nif):
amps.append([])
for i in range (0,nrow): # Loop over rows
SNrow = SNtab.ReadRow(i+1, err)
if err.isErr:
return None
for iif in range(0,nif):
if (SNrow["WEIGHT 1"][iif]>0.0) and (SNrow["REAL1"][iif]!=fblank):
amp = (SNrow["REAL1"][iif]**2+SNrow["IMAG1"][iif]**2)**0.5
amps[iif].append(amp)
if (npoln>1) and (SNrow["WEIGHT 2"][iif]>0.0) and (SNrow["REAL2"][iif]!=fblank):
amp = (SNrow["REAL2"][iif]**2+SNrow["IMAG2"][iif]**2)**0.5
amps[iif].append(amp)
# end IF loop
# End loop over table
# Close table
SNtab.Close(err)
if err.isErr:
return None
# Sort lists, get median, inner RMS
out = [] # Initialize output
for iif in range(0,nif):
if len(amps[iif])>3: # Need a min. amount of data
amps[iif].sort()
num = len(amps[iif])
medn = amps[iif][int(num//2)]
# inner 80% RMS about median
b = int(num//10); e = int(9*num//10);
sum2 = 0.0; count = 0
for i in range(b,e+1):
val = amps[iif][i]-medn
sum2 += val*val
count += 1
RMS = (sum2/count)**0.5
out.append((medn,RMS))
else:
out.append(None) # Too little
# end IF loop
return out
# end EVLASNAmpStats
def EVLABPAmpStats(uv, BPver, err, logfile='', check=False, debug=False):
"""
Determine median and RMS of inner 80% of amplitude distribution per Ant
Returns with err set on error
Return list:
[(medn1, RMS1),...,(mednnumAnt,RMSnumAnt)]
* uv = UV data object
* BPver = BP table to flag
* err = Python Obit Error/message stack
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
fblank = FArray.PGetBlank() # Magic blanking value
# Number of IFs
nif = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocif"]]
# Number of channels
nchan = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocf"]]
# Number of Stokes
npoln = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
npoln = min(2, npoln) # No more than 2
BPtab = uv.NewTable(Table.READONLY, "AIPS BP", BPver, err, \
numIF=nif, numPol=npoln)
if err.isErr:
return None
# Open
BPtab.Open(Table.READWRITE, err)
if err.isErr:
return None
# Number of antennas
nant = BPtab.Desc.List.Dict["NO_ANT"][2][0]
# Number of BP rows
nrow = BPtab.Desc.Dict["nrow"]
# Initialize accumulators, 1 per Antenna
# Each a list of amplitudes
amps = []
for i in range(0,nant):
amps.append([])
for i in range (0,nrow): # Loop over rows
BProw = BPtab.ReadRow(i+1, err)
if err.isErr:
return None
iant = BProw["ANTENNA"][0]-1 # 0 rel
for iif in range(0,nif*nchan):
if(BProw["REAL 1"][iif]!=fblank):
amp = (BProw["REAL 1"][iif]**2+BProw["IMAG 1"][iif]**2)**0.5
amps[iant].append(amp)
if (npoln>1) and (BProw["REAL 2"][iif]!=fblank):
amp = (BProw["REAL 2"][iif]**2+BProw["IMAG 2"][iif]**2)**0.5
amps[iant].append(amp)
# end IF loop
# End loop over table
if debug:
for i in range(0,nant):
if len(amps[i])>3:
print("EVLABPAmpStats max/min Ant ",i+1,max(amps[i]), min(amps[i]))
# Close table
BPtab.Close(err)
if err.isErr:
return None
# Sort lists, get median, inner RMS
out = [] # Initialize output
for iant in range(0,nant):
if len(amps[iant])>3: # Need a min. amount of data
amps[iant].sort()
num = len(amps[iant])
medn = amps[iant][int(num//2)]
# inner half RMS about median
b = int(num//10); e = int(9*num//10);
sum2 = 0.0; count = 0
for i in range(b,e+1):
val = amps[iant][i]-medn
sum2 += val*val
count += 1
RMS = (sum2/count)**0.5
out.append((medn,RMS))
else:
out.append(None) # Too little
# end antenna loop
return out
# end EVLABPAmpStats
def EVLAClipSNAmp(uv, SNver, arange, err, \
FGver=-1, FGuv=None, logfile='', check=False, debug=False):
"""
Flag SN table entries with amplitudes outside of [range[0]-range[1]]
Optionally adds entry to flag (FG) table
Returns with err set on error
* uv = UV data object
* SNver = SN table to flag
* arange = [min amp, max amp] allowed. list per IF
IF with None entry are ignored
* err = Python Obit Error/message stack
* FGver = FG table to add flags to, <=0 ->none
* FGuv = None or uv data to write flag s to, None=>uv
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
fblank = FArray.PGetBlank() # Magic blanking value
# Number of IFs
nif = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocif"]]
# Number of Stokes
npoln = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
SNTab = uv.NewTable(Table.READONLY, "AIPS SN", SNver, err, \
numIF=nif, numPol=npoln)
if err.isErr:
return
# Flag uv data
if FGuv==None:
outuv = uv
else:
outuv = FGuv
# Open
SNTab.Open(Table.READWRITE, err)
if err.isErr:
return
# Number of rows
nrow = SNTab.Desc.Dict["nrow"]
count = 0; total = 0
for i in range (0,nrow): # Loop over rows
SNrow = SNTab.ReadRow(i+1, err)
if err.isErr:
return
dirty = False
# Loop over IF
for iif in range (0, nif):
if arange[iif]!=None:
amin = arange[iif][0]
amax = arange[iif][1]
if (SNrow["WEIGHT 1"][iif]>0.0) and (SNrow["REAL1"][iif]!=fblank):
total += 1
amp = (SNrow["REAL1"][iif]**2+SNrow["IMAG1"][iif]**2)**0.5
if (amp<amin) or (amp>amax):
# Flag table?
EVLAFlagSNClip(uv, SNrow, iif+1, 1, err, FGver=FGver, FGuv=outuv, \
logfile=logfile, check=check, debug=debug)
SNrow["REAL1"][iif] = fblank
SNrow["IMAG1"][iif] = fblank
SNrow["WEIGHT 1"][iif] = 0.0
count += 1
dirty = True
# Second Poln
if (npoln>1) and (SNrow["WEIGHT 2"][iif]>0.0) and (SNrow["REAL2"][iif]!=fblank):
total += 1
amp = (SNrow["REAL2"][iif]**2+SNrow["IMAG2"][iif]**2)**0.5
if (amp<amin) or (amp>amax):
# Flag table?
EVLAFlagSNClip(uv, SNrow, iif+1, 2, err, FGver=FGver, FGuv=outuv, \
logfile=logfile, check=check, debug=debug)
SNrow["REAL2"][iif] = fblank
SNrow["IMAG2"][iif] = fblank
SNrow["WEIGHT 2"][iif] = 0.0
count += 1
dirty = True
# Rewrite if modified
if dirty and not check:
SNTab.WriteRow(i+1, SNrow, err)
if err.isErr:
return
# end loop over rows
# Close table
SNTab.Close(err)
if err.isErr:
return
mess = "Flagged %d of total %d Gain entries" % (count, total)
printMess(mess, logfile)
# end EVLAClipSNAmp
def EVLAClipBPAmp(uv, BPver, arange, err, \
logfile='', check=False, debug=False):
"""
Flag BP table entries with amplitudes outside of [range[0]-range[1]]
Optionally adds entry to flag (FG) table
Returns with err set on error
* uv = UV data object
* BPver = BP table to flag
* arange = [min amp, max amp] allowed. list per Antenna
Ant with None entry are ignored
* err = Python Obit Error/message stack
* logfile = logfile for messages
* check = Only check script
* debug = Only debug messages - no effect
"""
################################################################
fblank = FArray.PGetBlank() # Magic blanking value
# Number of IFs
nif = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocif"]]
# Number of channels
nchan = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocf"]]
# Number of Stokes
npoln = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
BPTab = uv.NewTable(Table.READONLY, "AIPS BP", BPver, err, \
numIF=nif, numPol=npoln)
if err.isErr:
return
# Open
BPTab.Open(Table.READWRITE, err)
if err.isErr:
return
# Number of antennas
nant = BPTab.Desc.List.Dict["NO_ANT"][2][0]
# Number of rows
nrow = BPTab.Desc.Dict["nrow"]
count = 0; total = 0
for i in range (0,nrow): # Loop over rows
BProw = BPTab.ReadRow(i+1, err)
if err.isErr:
return
dirty = False
iant = BProw["ANTENNA"][0]-1 # 0 rel
if arange[iant]!=None:
amin = arange[iant][0]
amax = arange[iant][1]
# Loop over IF,chan
for iif in range (0, nif*nchan):
if (BProw["REAL 1"][iif]!=fblank):
total += 1
amp = (BProw["REAL 1"][iif]**2+BProw["IMAG 1"][iif]**2)**0.5
if (amp<amin) or (amp>amax):
BProw["REAL 1"][iif] = fblank
BProw["IMAG 1"][iif] = fblank
count += 1
dirty = True
if debug:
print("EVLAClipBPAmp flag ant "+str(iant+1)+" pol 1 iif "+str(iif)+" amp "+str(amp))
# Second Poln
if (npoln>1) and (BProw["REAL 2"][iif]!=fblank):
total += 1
amp = (BProw["REAL 2"][iif]**2+BProw["IMAG 2"][iif]**2)**0.5
if (amp<amin) or (amp>amax):
BProw["REAL 2"][iif] = fblank
BProw["IMAG 2"][iif] = fblank
count += 1
dirty = True
if debug:
print("EVLAClipBPAmp flag ant "+str(iant+1)+" pol 1 iif "+str(iif)+" amp "+str(amp))
# Rewrite if modified
if dirty and not check:
BPTab.WriteRow(i+1, BProw, err)
if err.isErr:
return
# end loop over rows
# Close table
BPTab.Close(err)
if err.isErr:
return
mess = "Flagged %d of total %d Gain entries" % (count, total)
printMess(mess, logfile)
# end EVLAClipBPAmp
def EVLAFlagSNClip(uv, SNrow, IFno, poln, err, \
FGver=-1, FGuv=None, reason="BadAmp", logfile='', check=False, debug=False):
"""
Write flag table entry for SN table row
Returns with err set on error
* uv = UV data object
* SNrow = SN table row
* IFno = IF number to flag
* poln = polarization (1 or 2)
* err = Python Obit Error/message stack
* FGver = FG table to add flags to, <=0 ->none
* FGuv = None or uv data to write flag s to, None=>uv
* reason = reason string
* logfile = logfile for messages
* check = Only check script
* debug = Only debug
"""
################################################################
if FGver<=0: # Anthing wanted?
return
# Flag uv data
if FGuv==None:
outuv = uv
else:
outuv = FGuv
tr = [SNrow["TIME"][0]-SNrow["TIME INTERVAL"][0],SNrow["TIME"][0]+SNrow["TIME INTERVAL"][0]]
Ants = [SNrow["ANTENNA NO."][0],0]
IFs = [IFno, IFno]
sid = SNrow["SOURCE ID"][0]
if poln==1:
Stokes="1011"
amp = (SNrow["REAL1"][IFno-1]**2+SNrow["IMAG1"][IFno-1]**2)**0.5
else:
Stokes="0111"
amp = (SNrow["REAL2"][IFno-1]**2+SNrow["IMAG2"][IFno-1]**2)**0.5
if not check:
UV.PFlag(outuv, err, flagVer=FGver,
timeRange=tr, Ants=Ants, IFs=IFs, Stokes=Stokes,
Reason=reason)
if err.isErr:
return
if debug:
mess = "Flag SID %d Ant %d IF %d Poln %d Timerange %s - %s amp %f" % \
(sid, Ants[0],IFno,poln,day2dhms(tr[0]),day2dhms(tr[1]), amp)
printMess(mess, logfile)
# end EVLAFlagSNClip
def EVLACalModel(Source,
CalDataType=" ", CalFile=" ", CalName=" ", CalClass=" ", CalSeq=0, CalDisk=0, \
CalNfield=0, CalCCVer=1, CalBComp=[1], CalEComp=[0], CalCmethod=" ", CalCmode=" ", CalFlux=0.0, \
CalModelFlux=0.0, CalModelSI=0.0,CalModelPos=[0.,0.], CalModelParm=[0.,0.,0.], \
useSetJy=False):
"""
Create a calibrator model
returns dictionary with entries:
* Source = Calibrator source name
* CalDataType = Calibrator model file data type (AIPS,FITS)
* CalFile = Calibrator model FITS input image if Type=='FITS'
* CalName = Calibrator model Cleaned AIPS map name
* CalClass = Calibrator model Cleaned AIPS map class
* CalSeq = Calibrator model Cleaned AIPS map seq
* CalDisk = Calibrator model Cleaned AIPS map disk
* CalNfield = Calibrator model No. maps to use for model
* CalCCVer = Calibrator model CC file version
* CalBComp = Calibrator model First CLEAN comp to use, 1/field
* CalEComp = Calibrator model Last CLEAN comp to use, 0=>all
* CalCmethod = Calibrator model Modeling method, 'DFT','GRID',' '
* CalCmodel = Calibrator model Model type: 'COMP','IMAG'
* CalFlux = Calibrator model Lowest CC component used
* CalModelSI = Calibrator Spectral index
* CalModelFlux= Parameterized model flux density (Jy)
* CalModelPos = Parameterized model Model position offset (asec)
* CalModelParm= Parameterized model Model parameters (maj, min, pa, type)
* useSetJy = Flag to use the flux density calculated from SetJy
"""
out = {
"Source":Source,
"CalDataType":CalDataType,
"CalFile":CalFile,
"CalName":CalName,
"CalClass":CalClass,
"CalSeq":CalSeq,
"CalDisk":CalDisk,
"CalNfield":CalNfield,
"CalCCVer":CalCCVer,
"CalBComp":CalBComp,
"CalEComp":CalEComp,
"CalCmethod":CalCmethod,
"CalCmodel":CalCmode,
"CalFlux":CalFlux,
"CalModelSI":CalModelSI,
"CalModelFlux":CalModelFlux,
"CalModelPos":CalModelPos,
"CalModelParm":CalModelParm,
"useSetJy":useSetJy
}
return out
# end EVLACalModel
def EVLAStdModel(Cals, freq):
"""
Check for standard models in a calibrator list
"""
# Standard models in FITS files
stdModel = []
# 3C286 Q
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[40.0e9,50.0e9],
"file":"3C286QModel.fits","disk":1}
stdModel.append(model)
# 3C286 Ka
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[28.0e9,40.0e9],
"file":"3C286KaModel.fits","disk":1}
stdModel.append(model)
# 3C286 K
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[18.01e9,28.0e9],
"file":"3C286KModel.fits","disk":1}
stdModel.append(model)
# 3C286 Ku
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[12.0e9,18.0e9],
"file":"3C286KuModel.fits","disk":1}
stdModel.append(model)
# 3C286 X
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[8.0e9,12.0e9],
"file":"3C286XModel.fits","disk":1}
stdModel.append(model)
# 3C286 Chi
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[6.0e9,8.0e9],
"file":"3C286CModel.fits","disk":1}
#"file":"3C286ChiModel.fits","disk":1}
stdModel.append(model)
# 3C286 C
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[3.9e9,8.1e9],
"file":"3C286CModel.fits","disk":1}
stdModel.append(model)
# 3C286 S
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[1.9e9,4.1e9],
"file":"3C286SModel.fits","disk":1}
stdModel.append(model)
# 3C286 L
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[0.9e9,2.1e9],
"file":"3C286LModel.fits","disk":1}
stdModel.append(model)
# 3C286 P
model = {"Source":["3C286","3c286","J1331+3030","1331+305=3C286","3C 286"],
"freqRange":[1.5e8,7.0e8],
"file":"3C286PModel.fits","disk":1}
stdModel.append(model)
# 3C147 Q
model = {"Source":["3C147","3c147","J0542+4951", "0542+498=3C147"],
"freqRange":[40.0e9,50.0e9],
"file":"3C147QModel.fits","disk":1}
stdModel.append(model)
# 3C147 Ka
model = {"Source":["3C147","3c147","J0542+4951", "0542+498=3C147"],
"freqRange":[28.0e9,40.0e9],
"file":"3C147KaModel.fits","disk":1}
stdModel.append(model)
# 3C147 K
model = {"Source":["3C147","3c147","J0542+4951", "0542+498=3C147"],
"freqRange":[18.01e9,28.0e9],
"file":"3C147KModel.fits","disk":1}
stdModel.append(model)
# 3C147 Ku
model = {"Source":["3C147","J0542+4951", "0542+498=3C147"],
"freqRange":[12.0e9,18.0e9],
"file":"3C147KuModel.fits","disk":1}
stdModel.append(model)
# 3C147 X
model = {"Source":["3C147","3c147","J0542+4951", "0542+498=3C147"],
"freqRange":[8.0e9,12.0e9],
"file":"3C147XModel.fits","disk":1}
stdModel.append(model)
# 3C147 S
model = {"Source":["3C147","J0542+4951", "0542+498=3C147"],
"freqRange":[1.9e9,4.1e9],
"file":"3C147SModel.fits","disk":1}
stdModel.append(model)
# 3C147 C
model = {"Source":["3C147","3c147","J0542+4951", "0542+498=3C147"],
"freqRange":[3.9e9,8.1e9],
"file":"3C147CModel.fits","disk":1}
stdModel.append(model)
# 3C147 L
model = {"Source":["3C147","3c147","J0542+4951", "0542+498=3C147"],
"freqRange":[0.9e9,2.1e9],
"file":"3C147LModel.fits","disk":1}
stdModel.append(model)
# 3C147 P
model = {"Source":["3C147","3c147", "J0542+4951", "0542+498=3C147"],
"freqRange":[1.5e8,7.0e8],
"file":"3C147PModel.fits","disk":1}
stdModel.append(model)
# 3C48 Q
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[40.0e9,50.0e9],
"file":"3C48QModel.fits","disk":1}
stdModel.append(model)
# 3C48 Ka
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[28.0e9,40.0e9],
"file":"3C48KaModel.fits","disk":1}
stdModel.append(model)
# 3C48 K
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[18.01e9,28.0e9],
"file":"3C48KModel.fits","disk":1}
stdModel.append(model)
# 3C48 C
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[3.9e9,8.1e9],
"file":"3C48CModel.fits","disk":1}
stdModel.append(model)
# 3C48 S
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[1.9e9,4.1e9],
"file":"3C48SModel.fits","disk":1}
stdModel.append(model)
# 3C48 L
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[0.9e9,2.1e9],
"file":"3C48LModel.fits","disk":1}
stdModel.append(model)
# 3C48 P
model = {"Source":["3C48","3c48","J0137+3309","0137+331=3C48"],
"freqRange":[1.5e8,7.0e8],
"file":"3C48PModel.fits","disk":1}
stdModel.append(model)
# 3C123 C
model = {"Source":["3C123","3c123","J0437+2940"],
"freqRange":[3.9e9,8.1e9],
"file":"3C123CModel.fits","disk":1}
stdModel.append(model)
# 3C123 S
model = {"Source":["3C123","3c123","J0437+2940"],
"freqRange":[1.9e9,4.1e9],
"file":"3C123SModel.fits","disk":1}
stdModel.append(model)
# 3C123 L
model = {"Source":["3C123","3c123","J0437+2940"],
"freqRange":[0.9e9,0.1e9],
"file":"3C123LModel.fits","disk":1}
stdModel.append(model)
# 3C123 P
model = {"Source":["3C123","3c123","J0437+2940"],
"freqRange":[1.5e8,7.0e8],
"file":"3C123PModel.fits","disk":1}
stdModel.append(model)
# 3C138 Q
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[40.0e9,50.0e9],
"file":"3C138QModel.fits","disk":1}
stdModel.append(model)
# 3C138 Ka
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[28.0e9,40.0e9],
"file":"3C138KaModel.fits","disk":1}
stdModel.append(model)
# 3C138 K
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[18.01e9,28.0e9],
"file":"3C138KModel.fits","disk":1}
stdModel.append(model)
# 3C138 Ku
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[12.0e9,18.0e9],
"file":"3C138KuModel.fits","disk":1}
stdModel.append(model)
# 3C138 X
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[8.0e9,12.0e9],
"file":"3C138XModel.fits","disk":1}
stdModel.append(model)
# 3C138 C
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[3.9e9,8.1e9],
"file":"3C138CModel.fits","disk":1}
stdModel.append(model)
# 3C138 S
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[1.9e9,4.1e9],
"file":"3C138SModel.fits","disk":1}
stdModel.append(model)
# 3C138 L
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[0.9e9,2.1e9],
"file":"3C138LModel.fits","disk":1}
stdModel.append(model)
# 3C138 P
model = {"Source":["3C138","3c138","J0521+1638","0521+166=3C138"],
"freqRange":[1.5e8,7.0e8],
"file":"3C138PModel.fits","disk":1}
stdModel.append(model)
# 3C295 C
model = {"Source":["3C295","3c295","J1411+5212"],
"freqRange":[3.9e9,8.1e9],
"file":"3C295CModel.fits","disk":1}
stdModel.append(model)
# 3C295 S
model = {"Source":["3C295","3c295","J1411+5212"],
"freqRange":[1.9e9,4.1e9],
"file":"3C295SModel.fits","disk":1}
stdModel.append(model)
# 3C295 L
model = {"Source":["3C295","3c295","J1411+5212"],
"freqRange":[0.9e9,2.1e9],
"file":"3C295LModel.fits","disk":1}
stdModel.append(model)
# 3C295 P
model = {"Source":["3C295","3c295","J1411+5212"],
"freqRange":[1.5e8,7.0e8],
"file":"3C295PModel.fits","disk":1}
stdModel.append(model)
# 3C380 C
model = {"Source":["3C380","3c380","J1829+4844"],
"freqRange":[3.9e9,8.1e9],
"file":"3C380CModel.fits","disk":1}
stdModel.append(model)
# 3C380 S
model = {"Source":["3C380","3c380","J1829+4844"],
"freqRange":[1.9e9,4.1e9],
"file":"3C380SModel.fits","disk":1}
stdModel.append(model)
# 3C380 L
model = {"Source":["3C380","3c380","J1829+4844"],
"freqRange":[0.9e9,2.1e9],
"file":"3C380LModel.fits","disk":1}
stdModel.append(model)
# 3C380 P
model = {"Source":["3C380","3c380","J1829+4844"],
"freqRange":[1.5e8,7.0e8],
"file":"3C380PModel.fits","disk":1}
stdModel.append(model)
# loop testing
for Cal in Cals:
for model in stdModel:
if (Cal["Source"] in model["Source"]) and \
(freq>=model["freqRange"][0]) and \
(freq<=model["freqRange"][1]):
Cal["CalFile"] = model["file"]
Cal["CalDisk"] = model["disk"]
Cal["CalDataType"] = 'FITS'
Cal["CalCmethod"] = 'DFT'
Cal["CalNfield"] = 1
break
# end EVLAStdModel
def EVLAGetRefAnt(uv, Cals, err, solInt=10.0/60.0, flagVer=2, nThreads=1, \
noScrat=[], logfile='', check=False, debug=False):
"""
Find the best reference antenna
Runs Calib on Cals using the center half of each spectrum and
determine antenna with best average SNR
Return reference antenna number
* uv = UV data object to calibrate
* Cals = List of calibrators possibly with model
* err = Obit error/message stack
* solInt = solution interval (min)
* flagVer = Input Flagging table version
* nThreads = Number of threads to use
* check = Only check script, don't execute tasks
* debug = Run tasks debug, show input
* noScrat = list of disks to avoid for scratch files
* logfile = Log file for tasks
"""
# Calib on Amp cals
calib = ObitTask.ObitTask("Calib")
try:
calib.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
calib.taskLog = logfile
if not check:
setname(uv,calib)
calib.flagVer = flagVer
calib.solMode = "P!A"
calib.solType = "L1"
calib.nThreads = nThreads
calib.solInt = solInt
calib.noScrat = noScrat
# Central half of channels
# Channel selection
if not check:
d = uv.Desc.Dict
nchan =int( d["inaxes"][d["jlocf"]])
else:
nchan = 1
# Number to drop off each end
mchan = max (1, int(nchan/4))
calib.BChan = mchan
calib.EChan = nchan - mchan
OK = False # Must have some work
# Loop over calibrators
for Cal in Cals:
calib.Sources[0]= Cal["Source"]
calib.DataType2 = Cal["CalDataType"]
calib.in2File = Cal["CalFile"]
calib.in2Name = Cal["CalName"]
calib.in2Class = Cal["CalClass"]
calib.in2Seq = Cal["CalSeq"]
calib.in2Disk = Cal["CalDisk"]
calib.nfield = Cal["CalNfield"]
calib.CCVer = Cal["CalCCVer"]
calib.BComp = Cal["CalBComp"]
calib.EComp = Cal["CalEComp"]
calib.Cmethod = Cal["CalCmethod"]
calib.Cmodel = Cal["CalCmodel"]
calib.Flux = Cal["CalFlux"]
calib.Alpha = Cal["CalModelSI"]
calib.modelFlux = Cal["CalModelFlux"]
calib.modelPos = Cal["CalModelPos"]
calib.modelParm = Cal["CalModelParm"]
if debug:
calib.i
calib.debug = debug
#calib.prtLv = 5
# Trap failure
try:
if not check:
calib.g
pass
except Exception as exception:
print(exception)
mess = "Calib Failed retCode= "+str(calib.retCode)+" Source "+calib.Sources[0]
printMess(mess, logfile)
#return 1 # allow some failures
else:
OK = True
# end calibration loop
# Something work?
if not OK:
printMess("All calibrators failed", logfile)
return 1
# Open and close image to sync with disk
if not check:
uv.Open(UV.READONLY, err)
uv.Close(err)
# Digest SN table
if not check:
hiSN = uv.GetHighVer("AIPS SN")
mess = "Using SN table %d"%(hiSN)
printMess(mess, logfile)
stats = EVLASNStats(uv, hiSN, 1.0, err, logfile=logfile, check=check, debug=debug)
if err.isErr:
raise RuntimeError("Error finding reference antenna")
refAnt = stats["bestRef"]
del stats
else:
refAnt = 0
return refAnt
# end EVLAGetRefAnt
def EVLASNStats(uv, SNver, solInt, err, refAnts=[0], logfile='', check=False, debug=False):
"""
Find good timerange/ reference antenna on the basis of an SN table
Returns with err set on error
Return dict::
{"Source":source, "souID":souID, "timeRange":(tbeg,tend),
"Fract":fract_OK, "SNR":avg_SNR,"bestRef":refAnt}
If there is no SU table or source ID not in table, source name is blank
* uv = UV data object
* SNver = SN table to test
* solInt = statistics interval (min)
* refAnts = If values given, then list of acceptable ref. ants
* err = Python Obit Error/message stack
* logfile = logfile for messages
* check = Only check script
* debug = Only debug - no effect
"""
################################################################
badDict = {"Source":"None", "souID":0,"timeRange":[0.0,1000.0], \
"Fract":0.0, "SNR":0.0, "bestRef":-1}
# Number of IFs
nif = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocif"]]
# Number of Stokes
npoln = uv.Desc.Dict["inaxes"][uv.Desc.Dict["jlocs"]]
npoln = min(2, npoln) # No more than 2
SNtab = uv.NewTable(Table.READONLY, "AIPS SN", SNver, err, \
numIF=nif, numPol=npoln)
if err.isErr:
return badDict
# Make sure sorted
Table.PSort(SNtab, "TIME", False, err)
if err.isErr:
return badDict
# Number of antennas
nant = SNtab.Desc.List.Dict["NO_ANT"][2][0]
# Open
SNtab.Open(Table.READONLY, err)
if err.isErr:
return badDict
# Number of rows
nrow = SNtab.Desc.Dict["nrow"]
# Better be some
if nrow<2:
OErr.PLog(err, OErr.MildError, "Empty SN table %d"%(SNver))
return badDict
# Initialize
solIntD = solInt/1440.
time0 = -1.0e20 # Beginning time of current interval
timeE = -1.0e20 # End time of current interval
tlast = -1.0e20 # Last time
souId = -10 # Current source ID
accum = [] # solution period statistics array
times = None
SNR1 = None
SNR2 = None
antCnt = None
fract = 0.0
avgSNR = 0.0
totAnt = [] # Total valid IF/poln count per antenna
snrAnt = [] # Total valid IF/poln SNR per antenna
for i in range(0,nant):
totAnt.append(0)
snrAnt.append(0.0)
# For each interval collect an accum entry containing
# 0) source ID
# 1) (beginning_time, end_time)
# 2) Fraction of total ant/IF/poln occuring
# 3) Average SNR
# 4) [antenna_occurance_count]
# 5) [[avg_SNR_per_ant/IF]] Poln 1
# 6) [[avg_SNR_per_ant/IF]] Poln 2
for i in range (0,nrow): # Loop over rows
SNrow = SNtab.ReadRow(i+1, err)
if err.isErr:
return
time = SNrow["TIME"][0]
curSouID = SNrow["SOURCE ID"][0]
# New interval?
if (time>timeE) or (souID!=curSouID):
# Save any current values to accum
if times:
times[1] = tlast # actual end time
# Normalize accumulations by counts, overall statistics
acnt = 0
sum = 0.0
fract = 0.0
avgSNR = 0.0
for i in range(0,nant):
for j in range(0,nif):
if CNT1[i][j]>0:
totAnt[i] += CNT1[i][j]
snrAnt[i] += SNR1[i][j]
SNR1[i][j] /= CNT1[i][j]
acnt += 1
sum += SNR1[i][j]
if (npoln>1) and CNT2[i][j]>0:
snrAnt[i] += SNR2[i][j]
totAnt[i] += CNT2[i][j]
SNR2[i][j] /= CNT2[i][j]
acnt += 1
sum += SNR1[i][j]
if acnt>0:
avgSNR = sum / acnt
fract = float(acnt) / float(nant*nif*npoln)
pastSI = [souID, times, fract, avgSNR, antCnt, SNR1, SNR2]
accum.append(pastSI)
# Build new accumulators
times = [time,time+solIntD]
antCnt = [] # Occurences of this antenna
SNR1 = [] # Antenna array of sums of poln1
CNT1 = [] # Antenna array of counts of poln1
for i in range(0,nant):
antCnt.append(0)
# per antenna
snr1 = []
cnt1 = []
for j in range(0,nif):
snr1.append(0.0)
cnt1.append(0)
SNR1.append(snr1)
CNT1.append(cnt1)
# Second poln?
if npoln>1:
SNR2 = [] # Antenna array of sums of poln2
CNT2 = [] # Antenna array of sums of poln2
for i in range(0,nant):
snr2 = []
cnt2 = []
for j in range(0,nif):
snr2.append(0.0)
cnt2.append(0)
SNR2.append(snr2)
CNT2.append(cnt2)
# end build accumulators
timeE = time + solIntD
souID = curSouID
tlast = time
# end new period
# Accumulate
tlast = time # Save last time
iant = SNrow["ANTENNA NO."][0] - 1 # 0-rel antenna no.
antCnt[iant] += 1
# Loop over IF
for iif in range (0, nif):
if SNrow["WEIGHT 1"][iif]>0.0:
SNR1[iant][iif] += SNrow["WEIGHT 1"][iif];
CNT1[iant][iif] += 1;
# Second Poln
if npoln>1:
if SNrow["WEIGHT 1"][iif]>0.0:
SNR2[iant][iif] += SNrow["WEIGHT 2"][iif];
CNT2[iant][iif] += 1;
# end loop over rows
# Final accumulation?
times[1] = tlast # actual end time
# Normalize accumulations by counts, overall statistics
acnt = 0
sum = 0.0
fract = 0.0
avgSNR = 0.0
for i in range(0,nant):
for j in range(0,nif):
if CNT1[i][j]>0:
totAnt[i] += CNT1[i][j]
snrAnt[i] += SNR1[i][j]
SNR1[i][j] /= CNT1[i][j]
acnt += 1
sum += SNR1[i][j]
if (npoln>1) and CNT2[i][j]>0:
snrAnt[i] += SNR2[i][j]
totAnt[i] += CNT2[i][j]
SNR2[i][j] /= CNT2[i][j]
acnt += 1
sum += SNR1[i][j]
if acnt>0:
avgSNR = sum / acnt
fract = float(acnt) / float(nant*nif*npoln)
pastSI = [souID, times, fract, avgSNR, antCnt, SNR1, SNR2]
accum.append(pastSI)
# end loop
# Close table
SNtab.Close(err)
if err.isErr:
return badDict
# Find highest fraction
hiFract = 0.0
for s in accum:
hiFract = max (hiFract, s[2])
# eliminate (negate avg SNR) entries with lower fract
for s in accum:
if s[2]<0.99*hiFract:
s[3] = -s[3]
# Find highest avg SNR
hiSNR = 0.0
hi = [0.0, [0.0,1000.0], 0.0, 0.0]
for s in accum:
if s[3]>hiSNR:
hiSNR = s[3]
hi = s
# Normalize antenna average SNRs
for i in range (0,nant):
if totAnt[i]>0:
snrAnt[i] /= totAnt[i]
# deselect antennas not in refAnts (if any non zero)
if refAnts[0]>0:
for i in range (0,nant):
drop = True
for ra in refAnts:
if ra==i+1:
drop = False
# found it?
if drop:
snrAnt[i] = 0.0
totAnt[i] = 0
# Find best refant count - one with most valid occurences
bestCnt = 0
for i in range (0,nant):
if totAnt[i]>bestCnt:
bestCnt = totAnt[i]
# Find antenna with count equal to bestCnt with highest SNR
bestRef = 0
hiSNR = 0.0
for i in range (0,nant):
if (totAnt[i]>=bestCnt) and (snrAnt[i]>hiSNR):
bestRef = i+1
hiSNR = snrAnt[i]
# Lookup source name if SU table present
hiSU = uv.GetHighVer("AIPS SU")
souName = " " # default
if hiSU>= 1:
SUtab = uv.NewTable(Table.READONLY, "AIPS SU", 1, err, \
numIF=nif,)
SUtab.Open(Table.READONLY, err)
if err.isErr:
return badDict
# Number of rows
nrow = SUtab.Desc.Dict["nrow"]
for i in range (0,nrow): # Loop over rows
SUrow = SUtab.ReadRow(i+1, err)
if err.isErr:
return badDict
curSouID = SUrow["ID. NO."][0]
if hi!=None and curSouID==hi[0]: # This it?
souName = SUrow["SOURCE"][0]
break;
SUtab.Close(err)
if err.isErr:
return badDict
if debug:
print(totAnt,"\n", snrAnt,"\n")
for s in accum:
print(s[0],s[1],s[2],s[3])
# Create output structure
out = {"Source":souName, "souID":hi[0],"timeRange":hi[1], "Fract":hi[2], "SNR":hi[3], "bestRef":bestRef}
if debug:
print("SN Info",out)
return out
# end EVLASNStats
def EVLASaveOutFiles( pickleFile='manifest.pickle' ):
"""
Save pipeline output files Python object in a pickle file.
* pickleFile = name of pickle file
"""
EVLAAddOutFile( pickleFile, 'project', 'Python object pickle file' )
SaveObject( manifest, pickleFile, True)
# end EVLASaveOutFiles
def EVLAMakeManifest( manifest=manifest ):
"""
Extract filenames from the manifest structure and return as a list.
"""
# Build a list of all manifest
srcFiles = [] # list of files to be copied
for file in manifest['project']:
srcFiles.append( file['name'] )
srcKeys = list(manifest['source'].keys())
for srcKey in srcKeys:
for file in manifest['source'][ srcKey ]:
srcFiles.append( file['name'] )
return srcFiles
def EVLAValidManifest( manifest=manifest, logFile=None):
"""
Compare manifest with files in the current directory. Report differences.
Return True if manifest and CWD are equal. False otherwise.
* manifest = manifest data object
"""
ofList = EVLAMakeManifest( manifest=manifest )
cwdList = os.listdir( './' )
# List of files in manifest but not in CWD
notInCwd = [file for file in ofList if file not in cwdList]
if notInCwd:
mess = "ERROR manifest.pickle contains files not in current directory!"
printMess(mess, logFile)
mess = "ERROR List of missing files:\n" + pprint.pformat(notInCwd)
printMess(mess, logFile)
# List of files in CWD but not in manifest
notInOf = [file for file in cwdList if file not in ofList]
if notInOf:
mess = "ERROR Current directory contains files not in manifest.pickle!"
printMess(mess, logFile)
mess = "ERROR List of missing files:\n" + pprint.pformat(notInOf)
printMess(mess, logFile)
if notInCwd or notInOf:
return False # differ
else:
return True # equal
# end EVLAValidManifest
def EVLAMakeParmFile(subs, parmfile, template=None):
"""
Generate a parameter file from a template and a list of substitutions
* subs = list of substitutions as tuple: ("@PARAMETER@", "valuestring")
* parmfile = output parameter file
* template = name of template parameter file; if none, use default
"""
if not template:
template = os.getenv('EVLAPIPE','..')+'/EVLAContTemplateParm.py'
if not os.path.exists(template):
template = os.environ['OBIT'] + '/share/scripts/EVLAContTemplateParm.py'
if not os.path.exists(template):
template = 'EVLAContTemplateParm.py'
fdin = open(template, "r")
fdout = open(parmfile,"w")
line = fdin.readline()
while (line):
for s in subs:
line = line.replace(s[0],s[1])
fdout.write(line)
line = fdin.readline()
fdin.close()
fdout.close()
# end EVLAMakeParmFile
def EVLALowBandMakeParmFile(subs, parmfile, template=None):
"""
Generate a parameter file from a template and a list of substitutions
Lowband (P) specific version
* subs = list of substitutions as tuple: ("@PARAMETER@", "valuestring")
* parmfile = output parameter file
* template = name of template parameter file; if none, use default
"""
if not template:
template = os.getenv('EVLAPIPE','..')+'/EVLALowBandTemplateParm.py'
if not os.path.exists(template):
template = os.environ['OBIT'] + '/share/scripts/EVLALowBandTemplateParm.py'
if not os.path.exists(template):
template = 'EVLALowBandTemplateParm.py'
fdin = open(template, "r")
fdout = open(parmfile,"w")
line = fdin.readline()
while (line):
for s in subs:
line = line.replace(s[0],s[1])
fdout.write(line)
line = fdin.readline()
fdin.close()
fdout.close()
# end EVLAMakeLowBandParmFile
def EVLAGetParms( fileDict):
"""
Return a list for initializing the EVLA pipeline parameters file.
The list contains 2-element sequence types (tuples). The tuples contain
a substitution key and a replacement string.
* fileDict = a single file dictionary returned in the response from
ParseASDM
"""
session = EVLAGetSessionCode( fileDict )
wavelength = 2.99e8/fileDict['VLAFreq']
parms = [ ('@PROJECT@', fileDict['project_code']),
('@SESSION@', fileDict['session']),
('@BAND@', fileDict['band']),
('@VLAFREQ@', str(fileDict['VLAFreq'])),
('@VLACFG@', str(fileDict['VLACfg'])),
('@SPANBW@', str(fileDict['SpanBW'])),
('@DATAROOT@', fileDict['DataRoot']),
('@CONFIG@', str(fileDict['selConfig'])),
('@SELCHAN@', str(fileDict['selChan'])),
('@BPCAL@', str(fileDict['BPCal'])),
('@PHSCAL@', str(fileDict['PhsCal'])),
('@AMPCAL@', str(fileDict['AmpCal'])),
('@DLYCAL@', str(fileDict['DlyCal'])),
('@PINCAL@', str(fileDict['PCInsCals'])),
('@PRLDCAL@', str(fileDict['RLDCal'])),
('@REFANT@', str(fileDict['refAnt'])),
('@PLOTSRC@', "'"+str(fileDict['PlotSrc'])+"'"),
('@PLOTTIME@', str(fileDict['PlotTime'])),
('@TARGET@', str(fileDict['Targets'])),
#('@DESTDIR@', fileDict['DestDir']),
#('@ARCHFILEID@', fileDict['arch_file_id'])
]
return parms
# EVLAGetParms
def EVLAGetSessionCode( fileDict ):
"""
Get the project session code from a fileDict returned by
PipeUtil.ParseASDM.
* fileDict = dictionary returned by ParseASDM
"""
# Get session from archive file name
session = 'XX'
#VLBA pattern = re.compile(r'EVLA_[A-Za-z]+[0-9]+([A-Za-z]+)')
#VLBA match = re.match( pattern, fileDict['logical_file'] )
#VLBA if match:
#VLBA session = match.group(1)
return session
# end EVLAGetSessionCode
def EVLAGetBandLetter( freq ):
"""
Return the project observing band letter from frequency
* freq = Frequency in Hz
"""
if freq<100.0e6:
return "4"
elif freq<900.0e6:
return "P"
elif freq<2.0e9:
return "L"
elif freq<3.7e9:
return "S"
elif freq<7.5e9:
return "C"
elif freq<12.0e9:
return "X"
elif freq<18.0e9:
return "Ku"
elif freq<26.5e9:
return "K"
elif freq<40.0e9:
return "Ka"
elif freq<50.0e9:
return "Q"
elif freq<117.0e9:
return "A3"
elif freq<163.0e9:
return "A4"
elif freq<211.0e9:
return "A5"
elif freq<275.0e9:
return "A6"
elif freq<375.0e9:
return "A7"
elif freq<510.0e9:
return "A8"
elif freq<730.0e9:
return "A9"
elif freq<960.0e9:
return "A10"
if freq<2000.0e9:
return "A11"
else:
return "UK"
# end EVLAGetBandLetter
def EVLAGetRLDCal(asdm, config):
"""
Return list of R-L phase and delay calibrator info of known calibrators
[str((source_name, R-L phase, RM))]
* asdm = ASDM object
* cid = configuration ID
"""
nope = [(None, None, None)] # Default output
callist = []
# Known calibrators
known = [ \
{"name":"3C286", "pos2000":(3.5392577776, 0.53248521090), "tol":(0.001, 0.001), \
"RLPhase":66.0, "RM":0.0}, \
]
# Look through sources in ASDM field list
field = asdm.Field
scan = asdm.Scan
main = asdm.Main
for s in field:
# Check known list
for k in known:
if abs(s["referenceDir"][0]-k["pos2000"][0])<k["tol"][0] and \
abs(s["referenceDir"][1]-k["pos2000"][1])<k["tol"][1]:
# was this observed in config
OK = False
for m in main:
if m["configDescriptionId"]==config:
for scn in scan:
if scn["sourceName"]==s["fieldName"]:
OK = True
break;
if OK:
break
# If OK use whole thing as string
if OK: # Use it?
callist.append((s["fieldName"], k["RLPhase"], k["RM"]))
return callist
else:
return nope
# end EVLAGetRLDCal
def EVLAGetBandWavelength( fileDict ):
"""
Return the representative wavelength for the EVLA receiver associated with
the given file dictionary *fileDict*.
* fileDict = archive file dictionary
"""
wavelength = '??cm'
# Define lists for band code, upper and lower band frequency, and wavelength
## BandCode= [ 'P', 'P', 'L', 'S', 'C', 'X', 'U', 'K', 'Q', 'W']
## FreqLow = [ .312, .596, 1.35, 2.15, 4.61, 8.0, 12.0, 21.7, 41.0, 80.0] # GHz
## FreqHi = [ .342, .626, 1.75, 2.35, 5.11, 8.8, 15.4, 24.1, 45.0, 90.0] # GHz
## WaveLen = [ '90cm', '50cm', '20cm', '13cm', '6cm', '3cm', '2cm', '1cm', '7mm', '3mm']
# Combine P-band wavelengths
BandCode= [ 'P', 'L', 'S', 'C', 'X', 'U', 'K', 'Q', 'W']
FreqLow = [ .312, 1.35, 2.15, 4.61, 8.0, 12.0, 21.7, 41.0, 80.0] # GHz
FreqHi = [ .626, 1.75, 2.35, 5.11, 8.8, 15.4, 24.1, 45.0, 90.0] # GHz
WaveLen = [ '50-90cm', '20cm', '13cm', '6cm', '3cm', '2cm', '1cm', '7mm', '3mm']
# For FITSAIPS files, get frequency from filename, convert to
# representative wavelength
if fileDict['format'] == 'FITSAIPS':
pattern = re.compile('.*_(\d+\.\d+)([MG]HZ)')
match = re.match( pattern, fileDict['logical_file'] )
fGHz = 0.0
if match:
fGHz, unit = match.group( 1, 2 )
fGHz = float( fGHz )
if unit == 'MHZ':
fGHz = fGHz / 1000
for i,w in enumerate(WaveLen):
if (fGHz >= FreqLow[i] and fGHz <= FreqHi[i]):
wavelength = w
# For all other files, or when frequency is not found in filename,
# convert archive band letter to wavelength.
if fileDict['format'] == 'FITS-IDI' or wavelength == '??cm':
bandLetter = fileDict['obs_bands']
wavelength = WaveLen[ BandCode.index( bandLetter ) ]
return wavelength
# end EVLAGetBandWavelength
def EVLAParseASDM(ASDMRoot, err):
"""
Parse an ASDM and set up for processing
Return list of dicts per AIPS dataset:
VLSFreq, VLAcfg, selConfig, selChan, BPCal, AmpCal, PhsCal, DlyCal
* ASDMRoot = root of ASDM directory
* err = Obit error message object
"""
out = []
asdm = OASDM.OASDM(err, name="ASDM", DataRoot=ASDMRoot)
configs = asdm.GetConfigs()
VLACfg = asdm.GetArrayConfig()
refAnt = 0 # Let script figure it out
# Loop over configurations:
for c in configs:
cid = c["configDescriptionId"]
BPCal = asdm.GetBandpassCal(cid)
AmpCal = asdm.GetAmpCal(cid)
PhsCal = asdm.GetPhaseCal(cid)
DlyCal = asdm.GetPhaseCal(cid)+asdm.GetAmpCal(cid)+asdm.GetBandpassCal(cid)
Targets = asdm.GetTargets(cid)
band = EVLAGetBandLetter(c["avgRefFreq"])
RLDCal = EVLAGetRLDCal(asdm, cid)
# Loop over no channels
for n in c["nchands"]:
plt = asdm.Get1stBandpassScan(cid)
dict = {
"DataRoot":ASDMRoot,
"VLAFreq":c["avgRefFreq"],
"SpanBW":c["SpanBandwidth"],
"VLACfg":VLACfg,
"selConfig":cid,
"selChan":n,
"band":band,
"BPCal":BPCal,
"AmpCal":AmpCal,
"PhsCal":PhsCal,
"DlyCal":DlyCal,
"Targets":Targets,
"PlotSrc":plt['source'],
"PlotTime":plt['timeRange'],
"refAnt":refAnt,
"PCInsCals":DlyCal,
"RLPCal":"None",
"rlrefAnt":refAnt,
"RLDCal":RLDCal
}
out.append(dict)
# End loops
del asdm
return out
# end EVLAParseASDM
#VLBA def EVLAPrepare( starttime, stoptime, fitsDest, outputDest, project=None,
#VLBA template="EVLAContTemplateParm.py", parmFile=None ):
def EVLAPrepare( ASDMRoot, err, \
project=None, session=None, template=None, parmFile=None,
outputDest='', doLow=False):
"""
Prepare pipeline for processing.
Create parameter file. Give user the command to execute the pipeline.
* ASDMRoot = root directory of ASDM/BDF
* err = Obit message/error stack
* project = name of project, default = root of ASDMRoot
* session = session name of project, default = 'C'config'N'nchan
* template = name of template parameter file, def "EVLAContTemplateParm.py"
* parmFile = name of output parameter file; None => used default name
* doLow = True if the problem is the the EVLA lowband (P)
"""
# Check that DataRoot exists
if not os.path.exists(ASDMRoot):
OErr.PLog(err, OErr.Fatal, ASDMRoot+" Does not exist")
OErr.printErr(err)
return
# Get project name from ASDMRoot if not given
if not project:
parts = ASDMRoot.split(os.sep)
project = parts[len(parts)-1].split('.')[0]
print("Project", project)
# Get config info and parameters
fileList = EVLAParseASDM( ASDMRoot, err )
# Loop over files
print("Start pipeline with command(s):")
for fileNum in range (0,len(fileList)):
fileDict = fileList[fileNum]
fileDict['project_code'] = project
if session:
fileDict['session'] = session
else:
fileDict['session'] = 'C' + str(fileDict['selConfig']) + 'N' + str(fileDict['selChan'])
parmList = EVLAGetParms( fileDict)
parmFile = "EVLAContParm_" + fileDict['project_code'] + '_' + fileDict['session'] + \
'_Cfg' + str(fileDict['selConfig']) + '_Nch' + str(fileDict['selChan']) + '.py'
if doLow:
# P band
EVLALowBandMakeParmFile( parmList, parmFile, template=template )
print("ObitTalk EVLALowBandPipe.py AIPSSetup.py " + parmFile)
else:
# Cassegrain frequencies
EVLAMakeParmFile( parmList, parmFile, template=template )
print("ObitTalk EVLAContPipe.py AIPSSetup.py " + parmFile)
# end EVLAPrepare
def EVLAWriteVOTable( projMeta, srcMeta, filename="votable.xml", logfile='' ):
"""
Write metadata and file information to a VOTable.
* projMetadata = dictionary of project metadata
* srcMetadata = dictionary of single-source metadata
"""
now = datetime.datetime.utcnow()
doc = xml.dom.minidom.Document()
vo = doc.createElement("votable") # root element VOTABLE
# # Specify IVOA VOTable Schema
# vo.setAttribute("xmlns","http://www.ivoa.net")
# vo.setAttribute("xmlns:xsi","http://www.w3.org/2001/XMLSchema-instance")
# vo.setAttribute("xsi:schemaLocation",
# "http://www.ivoa.net http://www.ivoa.net/internal/IVOA/IvoaVOTable/VOTable-1.2-20090929")
doc.appendChild(vo)
rs2 = doc.createElement("resource") # RESOURCE Project Data
rs2.setAttribute("name","Project Data")
vo.appendChild(rs2)
# Write project metadata
keys = list(projMeta.keys())
setAttribs = XMLSetAttributes # use short name - save space
for key in keys:
pr = doc.createElement("param")
if key == "project":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype", "char"),
("arraysize","*"),
("ucd","meta.code") ] )
XMLAddDescription( pr, "Project code" )
elif key == "session":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char"),
("arraysize","*"),
("ucd","meta.code") ] )
XMLAddDescription( pr, "Project session identifier" )
elif key == "band":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char"),
("arraysize","*"),
("ucd","instr.bandpass;em.wl") ] )
XMLAddDescription( pr, "Representative receiver wavelength" )
elif key == "obsDate":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char"),
("arraysize","10"),
("ucd","time.start") ] )
XMLAddDescription( pr, "Observing date" )
elif key == "obsStart":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","float"),
("ucd","time.start"),
("unit","MJD") ] )
XMLAddDescription( pr, "Observation start time (MJD)" )
elif key == "obsStop":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","float"),
("ucd","time.end"),
("unit","MJD") ] )
XMLAddDescription( pr, "Observation stop time (MJD)" )
elif key == "procDate":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char"),
("arraysize","10"),
("ucd","time.processing") ] )
XMLAddDescription( pr, "Pipeline processing date" )
elif key == "PhsCals":
# All strings in array must have same length
maxLen = 0
for string in projMeta[key]:
length = len(string)
if length > maxLen:
maxLen = length
value = ""
for string in projMeta[key]:
# Concatenate strings, left justified, with min length maxLen+1
value += "%-*s" % ( maxLen+1, string )
arraysize = str(maxLen+1) + "x" + str( len(projMeta) )
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","char"),
("arraysize", arraysize ),
("ucd","meta.id;src.calib") ] )
XMLAddDescription( pr, "List of phase calibrators" )
elif key == "AmpCals":
# All strings in array must have same length
maxLen = 0
for string in projMeta[key]:
length = len(string)
if length > maxLen:
maxLen = length
value = ""
for string in projMeta[key]:
# Concatenate strings, left justified, with min length maxLen+1
value += "%-*s" % ( maxLen+1, string )
arraysize = str(maxLen+1) + "x" + str( len(projMeta) )
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","char"),
("arraysize", arraysize ),
("ucd","meta.id;src.calib") ] )
XMLAddDescription( pr, "List of amp calibrators" )
elif key == "BPCals":
# All strings in array must have same length
maxLen = 0
for string in projMeta[key]:
length = len(string)
if length > maxLen:
maxLen = length
value = ""
for string in projMeta[key]:
# Concatenate strings, left justified, with min length maxLen+1
value += "%-*s" % ( maxLen+1, string )
arraysize = str(maxLen+1) + "x" + str( len(projMeta) )
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","char"),
("arraysize", arraysize ),
("ucd","meta.id;src.calib") ] )
XMLAddDescription( pr, "List of bandpass calibrators" )
elif key == "DlyCals":
# All strings in array must have same length
maxLen = 0
for string in projMeta[key]:
length = len(string)
if length > maxLen:
maxLen = length
value = ""
for string in projMeta[key]:
# Concatenate strings, left justified, with min length maxLen+1
value += "%-*s" % ( maxLen+1, string )
arraysize = str(maxLen+1) + "x" + str( len(projMeta) )
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","char"),
("arraysize", arraysize ),
("ucd","meta.id;src.calib") ] )
XMLAddDescription( pr, "List of delay calibrators" )
elif key == "anNames":
numAnt = len( projMeta[key] )
value=""
for name in projMeta[key]:
value += name + " " # 2-char name plus space => len = 3
arraysize = "3x" + str( numAnt )
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","char"),
("arraysize", arraysize),
("ucd","meta.code") ] )
XMLAddDescription( pr, "List of antennas used in observation (antenna code)" )
elif key == "freqCov":
pairList = projMeta[key]
arraysize = "2x" + str( len( pairList ) )
value = ""
for pair in pairList:
value += "%f %f " % ( pair[0], pair[1] )
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","double"),
("arraysize", arraysize),
("ucd","em.freq"),
("unit", "Hz") ] )
XMLAddDescription( pr, "Observational frequency coverage: list of lower- and upper-side band pairs" )
elif key == "minFringe":
setAttribs( pr, [ ("name", key ),
("value", str( projMeta[key] ) ),
("datatype","double"),
("ucd","phys.angSize"),
("unit", "asec") ] )
XMLAddDescription( pr, "Minimum fringe spacing (arcseconds)" )
elif key == "dataSet":
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char"),
("arraysize","*"),
("ucd","meta.dataset;meta.file") ] )
XMLAddDescription( pr, "Name of archived raw-data file" )
elif key in ("obitVer", "aipsVer", "pyVer", "sysInfo", "pipeVer"):
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char"),
("arraysize","*"),
("ucd","meta.version;meta.software") ] )
XMLAddDescription( pr, "Version string" )
elif key in ("archFileID"):
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","int" ),
("ucd","meta.record;meta.dataset" ) ] )
XMLAddDescription( pr,
"Archive file ID (integer unique to each archive file)" )
elif key in ("fileSetID"):
setAttribs( pr, [ ("name", key ),
("value", projMeta[key] ),
("datatype","char" ),
("arraysize","*"),
("ucd","meta.record;meta.dataset" ) ] )
XMLAddDescription( pr,
"Pipeline data product set identification string (project code + observing date + archive file ID)" )
else:
mess = "WARN - Project report key/value " + key + "/" + str(projMeta[key]) + \
" not written to VOTable"
printMess(mess, logfile)
continue # skip append and go to next key
rs2.appendChild(pr)
table_ProjFiles = doc.createElement("table")
table_ProjFiles.setAttribute("name","files")
rs2.appendChild(table_ProjFiles)
fi = doc.createElement("field")
fi.setAttribute("name","file name")
fi.setAttribute("datatype","char")
fi.setAttribute("arraysize","*")
fi.setAttribute("ucd","meta;meta.file")
table_ProjFiles.appendChild(fi)
fi = fi.cloneNode( False )
fi.setAttribute("name","description")
fi.setAttribute("ucd","meta.title")
table_ProjFiles.appendChild(fi)
dt = doc.createElement("data")
table_ProjFiles.appendChild(dt)
td = doc.createElement("tabledata")
dt.appendChild(td)
# Make copy of node for later use
table_SrcFiles = table_ProjFiles.cloneNode( True )
# Project files
EVLAWriteVOTableFiles( manifest['project'], td)
# Loop over all sources
for src in srcMeta:
rs3 = doc.createElement("resource") # RESOURCE (each source)
rs3.setAttribute("name", src["Source"] )
vo.appendChild(rs3)
# Src metadata
keys = list(src.keys())
for key in keys:
pr = doc.createElement("param")
if key == "ObsDate":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","char"),
("arraysize","10"),
("ucd","time.start") ] )
XMLAddDescription( pr, "Observing date" )
elif key == "Source":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","char"),
("arraysize","*"),
("ucd","meta.code") ] )
XMLAddDescription( pr, "Source name" )
elif key == "RA":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","pos.eq.ra;src"),
("unit","deg") ] )
XMLAddDescription( pr, "Right ascension (phase center)" )
elif key == "Dec":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","pos.eq.dec;src"),
("unit","deg") ] )
XMLAddDescription( pr, "Declination (phase center)" )
elif key == "Exposure":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","time.duration;obs.exposure"),
("unit","8.64x10+4s") ] ) # frac of day
XMLAddDescription( pr, "Exposure time (fraction of day)" )
elif key == "numVis":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","int"),
("ucd","meta.number;obs") ] )
XMLAddDescription( pr, "Number of visibility measurements" )
elif key == "RAPnt":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","pos.eq.ra;instr"),
("unit","deg") ] )
XMLAddDescription( pr, "Right ascension (telescope pointing)" )
elif key == "DecPnt":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","pos.eq.dec;instr"),
("unit","deg") ] )
XMLAddDescription( pr, "Declination (telescope pointing)" )
elif key == "Freq":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","em.freq"),
("unit","Hz") ] )
XMLAddDescription( pr, "Center frequency" )
elif key == "BW":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","instr.bandwidth"),
("unit","Hz") ] )
XMLAddDescription( pr, "Bandwidth" )
elif key == "Size":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","pos.angDistance"),
("unit","deg") ] )
XMLAddDescription( pr, "Image angular size" )
elif key == "Cells":
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","pos.angResolution"),
("unit","deg") ] )
XMLAddDescription( pr, "Cell angular size (resolution)" )
elif key.find("Peak") == 1:
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","phot.flux.density;stat.max"),
("unit","Jy") ] )
XMLAddDescription( pr, "Peak flux" )
elif key.find("Sum") == 1:
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","phot.flux.density;arith"),
("unit","Jy") ] )
XMLAddDescription( pr, "Sum of clean component flux" )
elif key.find("RMS") == 1:
setAttribs( pr, [ ("name", key ),
("value", src[key] ),
("datatype","double"),
("ucd","stat.stdev;phot.flux.density"),
("unit","Jy") ] )
XMLAddDescription( pr, "Root-mean-square flux" )
elif key.find("Beam") == 1:
value = ""
for float in src[key]:
value += str( float ) + " "
setAttribs( pr, [ ("name", key ),
("value", value ),
("datatype","double"),
("arraysize","3"),
("ucd","instr.beam"),
("unit","deg") ] )
XMLAddDescription( pr, "Elliptical synthesized beam shape (major axis, minor axis, rotation)" )
else:
mess = "WARN - Source (" + src['Source'] + ") report key/value " + key + \
"/" + str(src[key]) + " not written to VOTable"
printMess(mess, logfile)
continue # skip append and go to next key
rs3.appendChild(pr)
tb = table_SrcFiles.cloneNode( True ) # make deep copy for this source
rs3.appendChild(tb)
nodeList = tb.getElementsByTagName('tabledata')
td = nodeList.item(0)
# if this source is in the manifest single-source dictionary...
if src['Source'] in manifest['source']:
fileList = manifest['source'][ src['Source'] ]
EVLAWriteVOTableFiles( fileList, td )
votable = open( filename, "w" )
doc.writexml( votable, addindent=" ", newl="\n") # readable format
# doc.writexml( votable ) # production format
# end EVLAWriteVOTable
def EVLAWriteVOTableFiles( fileList, tableData ):
"""
Write output file data to a VOTable.
* fileList = List of file dictionaries, from manifest
* tableData = TABLEDATA element to which child elements will be appended
"""
doc = xml.dom.minidom.Document()
for file in fileList:
tr = doc.createElement("tr")
tableData.appendChild(tr)
td = doc.createElement("td")
tr.appendChild(td)
tx = doc.createTextNode( file['name'] )
td.appendChild(tx)
td = doc.createElement("td")
tr.appendChild(td)
tx = doc.createTextNode( file['description'] )
td.appendChild(tx)
# end EVLAWriteVOTableFiles(
def EVLAAddOutFile( filename, target, description, logFile=""):
"""
Add file names and descriptions to the manifest object. Verify that the
file is not already in manifest before adding.
* filename = name of file to be added
* target = name of target source; or 'project' if this is a multi-source file
* description = description of file
"""
mess = "INFO Adding " + filename + \
" (for " + target + ") to list of output files."
printMess(mess, logFile)
d = { 'name' : filename, 'description' : description }
projFiles = manifest['project']
srcFiles = manifest['source']
if ( target == 'project' ): # If this is a project file
if ( not d in projFiles ): # If file is not already in list
projFiles.append( d ) # Add file to project list
else: # else, it is a single-source file
if target in srcFiles: # If files already exist for this source
if ( not d in srcFiles[ target ] ): # If file is not already in list
srcFiles[ target ].append( d ) # Add file to target list
else:
# No files yet present for this source.
# Create a new dictionary key and assign it a list w/ member d
srcFiles[ target ] = [ d ]
# End EVLAAddOutFile
def EVLAFetchOutFiles( pickleFile='manifest.pickle', logFile=None):
"""
Fetch a pickled python object that holds pipeline output files. Check that
each output file still exists. If it does not, remove it from the object,
and print a warning.
* pickleFile = pickle file to be fetched
"""
if not os.path.exists( pickleFile ):
return
global manifest
manifest = FetchObject( pickleFile )
exists = [ file for file in manifest['project']
if os.path.exists( file['name'] ) ]
notExists = [ file for file in manifest['project']
if not os.path.exists( file['name'] ) ]
for file in notExists:
print("Doesn't exist (project) " + file['name'])
mess = "WARN Pipeline manifest pickle points to non-existant project file: " \
+ file['name'] + "\n Removing file from manifest."
printMess(mess, logFile)
manifest['project'] = exists
# Check single-source files
srcFiles = manifest['source']
srcFiles_copy = copy.deepcopy( srcFiles )
srcKeys = list(srcFiles.keys())
for srcName in srcKeys:
# Check files for each source
for file in srcFiles_copy[ srcName ]:
if not os.path.exists( file['name'] ):
srcFiles[ srcName ].remove( file ) # remove from original
print("Doesn't exist (source) " + file['name'])
mess = "WARN Pipeline manifest pickle points to non-existant source file: " \
+ file['name'] + "\n Removing file from manifest."
printMess(mess, logFile)
# If this source no longer has files, remove it
if len( srcFiles[ srcName ] ) == 0:
del srcFiles[ srcName ]
# end EVLAFetchOutFiles
def EVLASaveOutFiles( pickleFile='manifest.pickle' ):
"""
Save pipeline output files Python object in a pickle file.
* pickleFile = name of pickle file
"""
EVLAAddOutFile( pickleFile, 'project', 'Python object pickle file' )
SaveObject( manifest, pickleFile, True)
# end EVLASaveOutFiles
def EVLAAIPSName( project, session):
"""
Derive AIPS Name. AIPS file name will be project+session with project
truncated to fit in 12 characters.
* project = project name
* session = session code
"""
################################################################
Aname = Aname=(project.strip()+session)[0:12]
return Aname
# end EVLAAIPSName
def EVLAKntrPlots( err, catNos=[], imClass='?Clean', imName=[], project='tProj',
session='tSes', band='tB', disk=1, cleanUp=True, logfile='', check=False,
debug=False ):
"""
Create contour plots for the specified images. Image selection is made
based on the input catalog numbers (catNos), or, if catalog numbers are not
given, based on a pattern match to the image name and class. Pattern
matching follows the rules of function AMcat(). One PS file is generated
for each unique image name. Multiple images with the same name will be added
to the same file on different pages. Arugments project, session, and band
are used only in creating file names.
* err = Python Obit Error/message stack
* catNos = catalog numbers of images
* imClass = class of images to plot (used only if catNos is empty)
* imName = name of images to plot; None = make plots for each source (used
only if catNos is empty)
* project = project name
* session = project session
* band = project receiver band code
* disk = data disk number
* logfile = file for log messages
* debug = Turn on debug mode
"""
# Setup AIPS task KNTR
kntr = AIPSTask.AIPSTask("kntr")
try:
kntr.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
kntr.msgkill = 5
kntr.dogrey = 0
kntr.dovect = 0
kntr.ltype = -5 # Show border and labels w/o creation date and PL version
kntr.cbplot = -18 # half-power beam in bottom right; no contour overlay
# Set contour levels in units of cntr.clev (defined below). Contours begin
# with -2, -2^0.5, 2^0.5, and then increase as powers of root two.
levs = [ -2, -2**(0.5), 2**(0.5) ]
for i in range(27):
l = levs[-1] * 2.0**( 0.5 )
levs = levs + [ l ]
kntr.levs = AIPSTask.AIPSList( levs )
# Instantiate AIPS task LWPLA
lwpla = AIPSTask.AIPSTask("lwpla")
try:
lwpla.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
lwpla.msgkill = 5
# If catalog numbers not given, get all images matching class imClass
# and with names in list imName.
if (not catNos):
if not imName:
imName = '*'
elif not type(imName) == list:
imName = [ imName ]
for n in imName:
catNos += AMcat(disk=disk, Aname=n, Aclass=imClass, giveList=True )
for cno in catNos: # loop over images
image = getname(cno)
mess = "INFO Creating contour plot for image " \
+ image.Aname.strip() + ' . ' + image.Aclass.strip()
printMess(mess, logfile)
# Run KNTR to make plot
setname(image, kntr)
# Contour level unit = 2 * RMS noise
stats = imstat(image, err)
kntr.clev = 2 * stats['RMSHist']
# Trap failure - KNTR too stupid to live
try:
# Set the size of the contour plot: use the inner quarter
d = image.Desc.Dict
nx = d['inaxes'][0]
ny = d['inaxes'][1]
kntr.trc[1]=3*nx/4.0; kntr.trc[2]=3*ny/4.0;
kntr.blc[1]=nx/4.0; kntr.blc[2]=ny/4.0;
name = image.Aname.rstrip() # Image name w/o whitespace
outfile = project+'_'+session+'_'+band+'_'+name+'.cntr.ps'
outfile = re.sub('\s','_',outfile) # Deblank filename
if not check:
kntr.g
except Exception as exception:
print(exception)
mess = "Kntr Failed - continuing anyway"
printMess(mess, logfile)
else:
# Run LWPLA to make PS file
setname(image, lwpla)
lwpla.outfile = './'+outfile # output to current directory
# Trap failure
try:
if not check:
lwpla.g
except Exception as exception:
print(exception)
mess = "Lwpla Failed - continuing anyway"
printMess(mess, logfile)
else:
pass
if os.path.exists(outfile): # May not exist
EVLAAddOutFile( outfile, name, "Contour plot" )
# Convert 1st page of PS (Stokes I) to JPG
tmpPS = outfile[:-3] + '.1.ps'
tmpPDF = outfile[:-3] + '.pdf'
jpg = outfile[:-3] + '.jpg'
jpg = re.sub('\s','_',jpg) # deblank
printMess('Converting '+outfile+' (1st page) -> '+jpg,logfile)
# Extract first page of PS; Convert to PDF; Convert to JPG
# (on 64-bit, converting directly from PS to JPG does not work)
cmd = 'pstops 1000:0 ' + outfile + ' > ' + tmpPS + ';' + \
'ps2pdf ' + tmpPS + ' ' + tmpPDF + ';' + \
'convert -density 96 ' + tmpPDF + ' ' + jpg
print(cmd)
rtn = os.system(cmd)
if rtn == 0:
EVLAAddOutFile( jpg, name, "Contour plot (Stokes I)" )
if cleanUp:
os.remove(tmpPS)
os.remove(tmpPDF)
else:
# Print error message and leave the PS file
mess="Error occurred while converting PS to JPG"
printMess(mess,logfile)
# Delete plot files
if not check:
zz=image.ZapTable("AIPS PL", -1,err)
# end EVLAKntrPlots
def EVLADiagPlots( uv, err, cleanUp=True, JPEG=True, sources=None, project='',
session='', band='', logfile=None, check=False, debug=False ):
"""
Generate single source diagnostic plots.
This method uses the averaged, calibrated data generated by the
pipeline to produced single source diagnostics. The diagnostics
can be used to assess the quality of the visibility data underlying
the pipeline image maps and to assess the quality of the pipeline
algorithms.
* uv = UV data object to plot
* err = Python Obit Error/message stack
* cleanUp = clean up temporary files when finished
* JPEG = if True, make JPEG plots; else make PS
* sources = list of sources; None = make plots for each source
* logfile = logfile for messages
* check = Only check script
* debug = Turn on debug mode
"""
mess = "Generating diagnostic plots for each source"
printMess(mess, logfile)
avgClass = 'UVAvgT' # uv average temporary data
avgSeq = 1
# Average data over: 1 sec, all IFs, all channels
calAvgTime = 1 # temporal averaging (sec)
printMess("Averaging: "+str(calAvgTime)+" sec interval, all IFs, all channels",
logfile = logfile)
rtn = EVLACalAvg( uv, avgClass=avgClass, avgSeq=avgSeq, err = err,
logfile = logfile, check=check, debug = debug, CalAvgTime = calAvgTime,
avgFreq = 3, # avg all IFs
chAvg = 0, # avg all channels (should already have been done)
doCalib = 2, # apply calibration
doBand = 0, # do not calibrate bandpass; already calibrated
flagVer = 1 # Apply any flags
)
if rtn != 0:
mess = "Error averaging data. EVLACalAvg returned: " + str(rtn)
printMess(mess, logfile)
return rtn
# Get the newly averaged data set: most recent file with class UVAvg
uvAvg = None
if not check:
uvname = uv.GetName()+"_Cal"
uvAvg = UV.newPAUV(uvname, uv.Aname, avgClass, uv.Disk, avgSeq,
True, err)
# Put source list into slist
if not sources:
slist = EVLAAllSource(uvAvg,err,logfile=logfile,check=check,debug=debug)
else:
slist = sources
if not type(slist) == list:
slist = [slist]
# Setup UVPLT
uvplt = AIPSTask.AIPSTask("uvplt")
try:
uvplt.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uvAvg, uvplt)
uvplt.stokes = 'I' # unpolarized
uvplt.ltype = -3 # Omit PL number and creation time
uvplt.msgkill = 5 # Omit babble
printMess("Plotting stokes "+uvplt.stokes, logfile=logfile)
# Setup LWPLA
lwpla = AIPSTask.AIPSTask("lwpla")
try:
lwpla.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
lwpla.msgkill = 5
if not check:
setname(uvAvg, lwpla)
# Define plots: file => filename string, bparm => UVPLT
plotTypes = ( { 'file' : 'amp', 'bparm' : [3,1] , 'desc': 'Amp vs. uv Dist'},
{ 'file' : 'uv' , 'bparm' : [7,6,2], 'desc': 'u vs. v '},
{ 'file' : 'ri' , 'bparm' : [10,9], 'desc': 'Re vs. Im'} )
# Loop over sources
for (i,s) in enumerate(slist):
mess = "INFO Generating diagnostic plots for source "+s+ \
" ("+str(i+1)+"/"+str(len(slist))+")"
printMess(mess, logfile)
uvplt.sources[1] = s
# Loop over plot types
for plot in plotTypes:
uvplt.bparm = AIPSTask.AIPSList( plot['bparm'] )
uvplt.msgkill = 5 # Omit babble
# Create output file name
outfile = project+'_'+session+'_'+band+'_'+s+'.'+plot['file']+'.ps'
lwpla.outfile = './'+outfile # output to current directory
# Remove preexisting file
if os.path.exists(outfile): os.remove(outfile)
if not check:
try:
uvplt.go()
lwpla.go()
except Exception as exception:
mess = "ERROR Plotting failed - continuing anyway"
printMess(mess, logfile)
mess = "ERROR "+ str(exception)
printMess(mess, logfile)
else:
if JPEG:
# Convert PS -> PDF; Convert PDF -> JPG
# (on 64-bit, converting directoy PS -> JPG fails)
tmpPDF = outfile[:-3] + '.pdf'
jpg = outfile[:-3] + '.jpg'
printMess('Converting '+outfile+' -> '+jpg,logfile)
cmd = 'convert ' + outfile + ' ' + tmpPDF + ';' + \
'convert -density 96 ' + tmpPDF + ' ' + jpg
rtn = os.system(cmd)
if rtn == 0:
EVLAAddOutFile( jpg, s, plot['desc'] )
if cleanUp:
os.remove(outfile) # Remove the PS file
os.remove(tmpPDF)
else:
# Print error message and leave the PS file
mess="Error occurred while converting PS to JPG"
printMess(mess,logfile)
# Open/close UV to update header
if not check:
uvAvg.Open(UV.READONLY,err)
uvAvg.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
if cleanUp:
printMess('Cleaning up temporary data',logfile)
if not check:
zap(uvAvg)
# end EVLADiagPlot
def EVLAProjMetadata( uv, AIPS_VERSION, err,
PCals=[], ACals=[], BPCals=[], DCals=[],
project='project', session='session', band='band', dataInUVF='',
archFileID='' ):
"""
Return a dictionary holding project metadata. Contents:
=============== ========================================================
"project" observation project name
"session" observation project session
"band" receiver band code
"obsDate" observation date
"obsStart" observation start time (MJD)
"obsStop" observation stop time (MJD)
"procDate" pipeline processing date
"PhsCals" array of phase calibrators
"AmpCals" array of amplitude calibrators
"BPCals" array of bandpass calibrators
"DlyCals" array of delay calibrators
"obitVer" Obit version (TBD)
"aipsVer" AIPS version
"pyVer" Python version
"sysInfo" system information on processing machine (uname -a)
"anNames" names of all antennas used
"freqCov" frequency coverage (low & up sideband pairs for all IFs)
"minFring" minimum fringe spacing (asec)
"dataSet" Data set archive file name
"archFileID" Archive file ID
=============== ========================================================
* uv = uv data object for which the report will be generated
* AIPS_VERSION = AIPS version information
* err = Python Obit Error/message stack
* PCals = list of phase cal models
* ACals = list of amp cal models
* BPCals = list of bandpass cal models
* DCals = list of delay cal models
* project = Observation project name
* session = Observation project session
* band = receiver band code
* dataInUVF = data set archive file name
* archFileID = archive file ID
"""
# Get lists of calibrator names from model lists.
PCalList = []
for s in PCals:
PCalList.append(s['Source'])
ACalList = []
for s in ACals:
ACalList.append(s['Source'])
BPCalList = []
for s in BPCals:
BPCalList.append(s['Source'])
DCalList = []
for s in DCals:
DCalList.append(s['Source'])
r = {}
r["project"] = project
r["session"] = session
r["band"] = band # Receiver band code
r["obsDate"] = uv.Desc.Dict["obsdat"] # observation date
times = getStartStopTime( uv, err )
r["obsStart"] = times[0]
r["obsStop"] = times[1]
r["procDate"] = str( datetime.date.today() ) # processing date
r["obitVer"] = Version() # Obit version
r["pipeVer"] = getSVNVersion(os.getenv("EVLAPIPE",".")) # Pipeline version
# Does this need to be passed as a function argument?
r["aipsVer"] = AIPS_VERSION + '(' + str( datetime.date.today() ) + ')' # AIPS version
r["pyVer"] = sys.version # python version
p = os.popen("uname -a") # get sys info
r["sysInfo"] = p.read()
r["PhsCals"] = PCalList # list of phase calibrators
r["AmpCals"] = ACalList # list of amp calibrators
r["BPCals"] = BPCalList # list of bandpass calibrators
r["DlyCals"] = DCalList # list of delay calibrators
parts = dataInUVF.split(os.sep)
r["dataSet"] = parts[len(parts)-1]
r["archFileID"] = archFileID # archive file ID
r["fileSetID"] = r["project"] + "_" + r["obsDate"][2:].replace('-','') + "_" + \
str(r["archFileID"])
# Get antenna names and positions
antab = uv.NewTable(Table.READONLY,"AIPS AN",1,err)
antab.Open(Table.READONLY,err)
OErr.printErrMsg(err) # catch table open errors
nrow = antab.Desc.Dict["nrow"]
annames = []
anpos = []
for i in range(1,nrow+1):
anrow = antab.ReadRow(i, err)
name = anrow["ANNAME"][0].rstrip()
annames.append( name )
pos = anrow["STABXYZ"]
anpos.append( pos )
antab.Close(err)
r["anNames"] = annames # list of antennas used
# Get the frequency coverage
d = uv.Desc.Dict # UV data descriptor dictionary
refFreq = d["crval"][ d["jlocf"] ] # reference frequency
fqtab = uv.NewTable(Table.READONLY,"AIPS FQ",1,err)
fqtab.Open(Table.READONLY,err)
OErr.printErrMsg(err) # catch table open errors
nrow = fqtab.Desc.Dict["nrow"]
freqCov = []
for i in range(1,nrow+1):
fqrow = fqtab.ReadRow(i, err)
freq = fqrow["IF FREQ"]
bw = fqrow["TOTAL BANDWIDTH"]
sb = fqrow["SIDEBAND"] # +1 => 'IF FREQ' is upper-side band; -1 => lower-side band
for i in range( len(freq) ):
f1 = refFreq + freq[i] # 1st bound of IF
f2 = f1 + sb[i] * bw[i] # 2nd bound of IF
fc = [ f1, f2 ]
fc.sort()
freqCov.append( fc ) # sort bounds and add to list
fqtab.Close(err)
r["freqCov"] = freqCov
# Calculate the minimum fringe spacing
maxBl = 0 # maximum baseline length
maxBlAnt = [] # antenna indices forming maximum baseline
for (i, p1) in enumerate( anpos ):
for (j, p2) in enumerate( anpos ):
if i == j: continue
dpos = [0, 0, 0]
for k in range(3):
dpos[k] = p1[k] - p2[k]
# Baseline length in meters
bl = ( dpos[0]**2 + dpos[1]**2 + dpos[2]**2 )**(0.5)
if bl > maxBl:
maxBl = bl
maxBlAnt = [i, j]
# r["maxBl"] = [ annames[ maxBlAnt[0] ], # antennas forming max baseline
# annames[ maxBlAnt[1] ] ]
lightSpeed = 299792458 # ( meters / second)
wavelength = lightSpeed / refFreq
maxBlWavelength = maxBl / wavelength # max baseline (units of wavelength)
# minimum fringe spacing (asec)
r["minFringe"] = 1 / maxBlWavelength / 4.8481368e-6
return r
# end EVLAProjMetadata
def EVLASrcMetadata(uv, err, FreqID=1, Sources=None, \
seq=1, sclass="IClean", \
Stokes="I", logfile='', check=False, debug=False):
"""
Generate report info for a list of targets in AIPS files
Returns a report which is a list of dicts, each of which contains
=========== ==========================================
"Source" Source name
"haveImage" True if images were made,
"ObsDate" Observing date as "yyyy-mm-dd"
"numVis" Number of visibilities (ignoring flagging)
"Exposure" Total integration time (day)
"RA" Source RA (deg) at standard equinox
"Dec" Source Dec (deg) at standard equinox
=========== ==========================================
following present if haveImage True
======== ==============================================
"RAPnt" Antenna pointing RA (deg) at standard equinox
"DecPnt" Antenna pointing Dec (deg) at standard equinox
"Freq" Reference frequency (Hz)
"BW" Image bandwidth (Hz)
"Size" Width of image in deg (From Stokes I)
"Cells" Cell spacing in deg (From Stokes I)
"Stokes" Stokes parameters of images
for each s in Stokes:
======= ===============================
"sSum" Sum of clean components in Jy
"sPeak" Peak pixel brightness in Jy
"sRMS" RMS noise in inner quarter (Jy)
"sBeam" Beam (maj, min, PA) (deg)
======= ===============================
following present if haveImage False
======== ==============================================
======== ==============================================
* uv = UV data object
* err = Python Obit Error/message stack
* Sources = Source name or list of names to use
If an empty list all sources in uv are included
* seq = sequence number of images
* sclass = Image class, first character replaced with char in Stokes
* FreqID = Frequency group identifier
* Stokes = Stokes parameters of images
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
"""
################################################################
mess = "Generate source statistics "
printMess(mess, logfile)
# If list empty get all sources
if type(Sources)==list:
sl = Sources
else:
sl = [Sources]
if len(sl)<=0:
slist = EVLAAllSource(uv,err,logfile=logfile,check=check,debug=debug)
else:
slist = sl
# Init output
Report = []
# Image disk assumed same as uv
disk = uv.Disk
user = OSystem.PGetAIPSuser()
# Loop over slist
for sou in slist:
sdict = {"Source":sou, "haveImage":False} # Init source structure
sdict["ObsDate"] = uv.Desc.Dict["obsdat"]
# Observing stats
obstat = EVLAGetTimes (uv, sou, err, logfile=logfile, check=check, debug=debug)
sdict["numVis"] = obstat["numVis"]
sdict["Exposure"] = obstat["Exposure"]
sdict["RA"] = obstat["RA"]
sdict["Dec"] = obstat["Dec"]
# Test if image exists
cno = AIPSDir.PTestCNO(disk, user, sou, Stokes[0:1]+sclass[1:], "MA", seq, err)
if cno <= 0 :
Report.append(sdict) # Save source info
continue
# Image statistics, loop over Stokes
for s in Stokes:
klass = s+sclass[1:]
x = Image.newPAImage(s, sou, klass, disk, seq, True, err)
hd = x.Desc.Dict
sdict[s+"Beam"] = (hd["beamMaj"],hd["beamMin"],hd["beamPA"])
# Some from Stokes I only
if s == 'I':
sdict["haveImage"] = True
sdict["Size"] = hd["inaxes"][1]*hd["cdelt"][1]
sdict["Cells"] = hd["cdelt"][1]
sdict["RAPnt"] = hd["obsra"]
sdict["DecPnt"] = hd["obsdec"]
sdict["Freq"] = hd["crval"][hd["jlocf"]]
sdict["BW"] = hd["cdelt"][hd["jlocf"]]
sdict["Stokes"] = Stokes
blc = [int(hd["inaxes"][0]//4),int(hd["inaxes"][1]//4)]
trc = [int(3*hd["inaxes"][0]/4),int(3*hd["inaxes"][1]//4)]
stat = imstat(x,err,blc=blc,trc=trc) # Image statistics inner quarter
if abs(stat["Max"]) > abs(stat["Min"]):
sdict[s+"Peak"] = stat["Max"]
else:
sdict[s+"Peak"] = stat["Min"]
sdict[s+"RMS"] = stat["RMSHist"]
if x.GetHighVer("AIPS CC")>0:
sdict[s+"Sum"] = EVLAGetSumCC(x, err, logfile=logfile, check=check, debug=debug)
else:
sdict[s+"Sum"] = -1.0
# End stokes image loop
Report.append(sdict) # Save source info
# end loop over sources
# Give terse listing
for sdict in Report:
mess = "\n Source = "+sdict["Source"]+", Exposure="+"%5.3f"%(sdict["Exposure"]*24.)+" hr"
printMess(mess, logfile)
if sdict["haveImage"]:
mess = "IPol Beam = ("+"%8.3f"%(sdict["IBeam"][0]*3600000.0)+", %8.3f"%(sdict["IBeam"][1]*3600000.0)+ \
", %6.1f"%(sdict["IBeam"][2])+") mas, mas, deg"
printMess(mess, logfile)
for s in Stokes:
mess = "Stokes "+s+" Sum CC="+"%8.3f"%(sdict[s+"Sum"])+", Peak="+"%8.3f"%(sdict[s+"Peak"])+ \
", RMS="+"%8.5f"%(sdict[s+"RMS"])+" Jy"
printMess(mess, logfile)
# End terse listing
return Report
# end EVLASrcMetadata
def EVLAHTMLReport( projMetadata, srcMetadata, outfile="report.html",
logFile="" ):
"""
Write an HTML report on the processed data set. This includes information
on project (multi-source) metadata and data files as well as single source
metadata and data files.
* projMetadata = dictionary of project metadata
* srcMetadata = dictionary of single-source metadata
* outfile = name of HTML output file
* logFile = file for writing log messages
"""
mess = "Writing HTML report to " + outfile
printMess( mess, logFile )
file = open( outfile, 'w' )
EVLAAddOutFile( outfile, 'project', "HTML Report generated by pipeline" )
s = """
<html><head><style>
table {
border-collapse : collapse;
}
/* table,th,td {
border : 1px solid grey;
} */
.plot {
height: 200;
}
</style></head><body>"""
file.write( s )
s = "<h2> Contents </h2>"
s += "<a href='#project_Section'> Project </a>"
if srcMetadata:
for metadata in srcMetadata:
# Create links to each section
s += ' - <a href="#' + metadata['Source'] + '_Section">' + \
metadata['Source'] + '</a>'
file.write( s )
# Write project metadata
s = "<a id='project_Section'><h2> Project </h2></a>\n"
s += "<h3> Metadata </h3>\n"
s += "<table>\n"
# keys = [ 'project', 'session', 'band', 'obsDate', 'procDate', 'contCals',
# 'goodCal', 'anNames', 'freqCov', 'minFringe', 'obitVer',
# 'aipsVer', 'pyVer', 'sysInfo' ]
keys = None
s += writeTableRow( projMetadata, keys )
s += "</table>\n"
file.write( s )
# Write project output files
projFiles = manifest['project']
s = "<h3> Files </h3>\n"
s += "<table>\n"
for d in projFiles:
s += '<tr><th><a href="' + d['name'] + '">' + d['name'] + '</a>' + \
'</th><td>' + d['description'] + '</td></tr>\n'
s += '</table>\n'
s += '<hr>\n'
file.write( s )
# Write metadata and data files for each source
for metadata in srcMetadata:
# Create list of keys
keys = [ 'ObsDate', 'RA', 'Dec', 'Exposure', 'numVis', 'haveImage' ]
# if haveImage is True, these keys are also present
iKeys = [ 'RAPnt', 'DecPnt', 'Freq', 'BW', 'Size', 'Cells' ]
# These are present for each Stokes, w/ the Stokes character prepended
sKeys = [ 'Sum', 'Peak', 'RMS', 'Beam' ]
if metadata['haveImage'] == True:
keys = keys + iKeys
for s in metadata['Stokes']:
for k in sKeys:
keys.append(s + k)
# Write metadata table
s = '<a id="' + metadata['Source'] + '_Section">' + \
'<h2>' + metadata['Source'] + "</h2></a>\n"
s += "<h3> Metadata </h3>\n"
s += '<table>\n'
s += writeTableRow( metadata, keys )
s += '</table>\n'
file.write(s)
def writeImageCell( file ):
str = '<td><a href="' + file['name'] + '"> <img src="' + \
file['name'] + '" alt="' + file['description'] + \
'" class="plot"/></a></td>\n'
return str
s = "<table>\n"
s += "<tr><th>Contour</th><th>Amp vs Baseline</th><th>Re vs Im</th>"
s += "<th>U vs V</th></tr>\n"
s += "<tr>\n"
if metadata['Source'] in manifest['source']:
fileList = manifest['source'][ metadata['Source'] ]
tList = list(range(4))
for f in fileList:
if f['name'].find('cntr.jpg') != -1: tList[0] = f
if f['name'].find('amp.jpg') != -1: tList[1] = f
if f['name'].find('ri.jpg') != -1: tList[2] = f
if f['name'].find('uv.jpg') != -1: tList[3] = f
for f in tList:
if type(f)==dict:
s += writeImageCell( f )
s += '</tr></table>\n'
file.write(s)
# Write output file table
s = "<h3> Files </h3>\n"
s += "<table>\n"
if metadata['Source'] in manifest['source']:
for d in manifest['source'][ metadata['Source'] ]:
s += '<tr><th><a href="' + d['name'] + '">' + d['name'] + \
'</a>' + '</th><td>' + d['description'] + '</td></tr>'
s += "</table>\n"
s += "<hr>\n"
file.write(s)
s = "</body></html>"
file.write(s)
file.close()
# end EVLAHTMLReport
def writeTableRow( dict, keys=None ):
"""
Write the contents of a dictionary as an HTML table.
* dict = dictionary whose contents will be written
* keys = dictionary keys to be written
"""
if not keys:
keys = list(dict.keys())
keys.sort()
s = ""
# Write a row of the HTML table for every key in keys. Handle some key
# values specially.
for key in keys:
if key == "anNames":
# Print the number of antennas when printing the list of ant names
s += '<tr><th>' + key + '</th>' + \
'<td> Number = ' + str( len( dict[key] ) ) + ', ' + \
'Names = ' + str( dict[key] ) + \
'</td></tr>\n'
elif key == 'freqCov':
# Print frequencies with limited precision
fs = ""
for freqs in dict[key]:
fs += '(%.3e, %.3e) ' % ( freqs[0], freqs[1] )
s += '<tr><th>' + key + '</th><td>' + fs + '</td></tr>\n'
elif (key == 'RA') or (key == 'RAPnt'):
s += '<tr><th>' + key + '</th>' + \
'<td>' + UVDesc.PRA2HMS(dict[key]) + '</td></tr>\n'
elif (key == 'Dec') or (key == 'DecPnt'):
s += '<tr><th>' + key + '</th>' + \
'<td>' + UVDesc.PDec2DMS(dict[key]) + '</td></tr>\n'
elif (key == 'timeRange'):
s += '<tr><th> Time Range </th>' + \
'<td>' + day2dhms(dict['timeRange'][0]) + ' - ' + \
day2dhms(dict['timeRange'][1]) + ' </td></tr>\n'
elif (key == 'Freq'):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%6.3f"%(dict[key]*1.0e-9) + ' GHz </td></tr>\n'
elif (key == 'BW'):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%6.3f"%(dict[key]*1.0e-6) + ' MHz </td></tr>\n'
elif (key == 'SNR'):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%6.1f"%(dict[key]) + ' </td></tr>\n'
elif (key == 'Exposure'):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%6.3f"%(dict[key]*24.0) + ' Hours </td></tr>\n'
elif (key == 'Size') or (key == "Cells"):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%8.5f"%(dict[key]*3.6e3) + ' asec </td></tr>\n'
elif (key == 'ISum') or (key == "QSum") or (key == "USum"):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%8.3f"%(dict[key]*1.0e3) + ' mJy </td></tr>\n'
elif (key == 'IPeak') or (key == "QPeak") or (key == "UPeak"):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%8.3f"%(dict[key]*1.0e3) + ' mJy </td></tr>\n'
elif (key == 'IRMS') or (key == "QRMS") or (key == "URMS"):
s += '<tr><th>' + key + '</th>' + \
'<td>' + "%8.5f"%(dict[key]*1.0e3) + ' mJy </td></tr>\n'
elif (key == 'IBeam'):
s += '<tr><th> Clean Beam </th>' + \
'<td>' + \
" %6.4f, %6.4f, %6.1f"%(dict[key][0]*3.6e3, dict[key][1]*3.6e3, dict[key][2]) + \
' (asec,asec,deg) </td></tr>\n'
elif (key == 'FailProc'):
s += '<tr><th> Failing process </th>' + \
'<td>' + " %s"%(dict[key])+' </td></tr>\n'
else:
# Everything else
s += '<tr><th>' + key + '</th>' + \
'<td>' + str( dict[key] ) + '</td></tr>\n'
return s
# end writeTableRow
def EVLAScriptHistory (uv, scriptName, aipsSetup, parmFile, err):
"""
Write script names to uv history
* uv = UV data to smooth
* scriptName = name of processing script
* aipsSetup = name of AIPS setup file
* parmFile = name of parameter file
* err = Python Obit Error/message stack
"""
outHistory = History.History("inhistory", uv.List, err)
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Script "+scriptName,err)
outHistory.WriteRec(-1,"script aipsSetup="+aipsSetup+" / AIPS setup file",err)
outHistory.WriteRec(-1,"script parmFile="+parmFile+" / parameter file",err)
outHistory.Close(err)
# end EVLAScriptHistory
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@[email protected]@.PATH_END.py
|
{
"filename": "datafiles_window.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/datafiles_window.py",
"type": "Python"
}
|
import sys #,os
from PyQt6 import QtWidgets,QtGui,QtCore
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(False)
class datafiles_window(QtWidgets.QDialog):
def __init__(self, parent = None):
# super(show_symbols, self).__init__(parent)
super(datafiles_window, self).__init__()
self.layout = QtWidgets.QVBoxLayout(self)
self.title = 'Select valid data file'
# self.setFixedSize(550, 800)
self.widget=QtWidgets.QWidget(self) # central widget
self.setGeometry(1,1, 495, 325)
self.treeview = QtWidgets.QTreeView()
self.listview = QtWidgets.QListView()
self.layout.addWidget(self.treeview)
self.layout.addWidget(self.listview)
path = QtCore.QDir.homePath()
self.dirModel = QtGui.QFileSystemModel()
self.dirModel.setRootPath(path) #QDir.currentPath())
self.dirModel.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.AllDirs)
self.fileModel = QtGui.QFileSystemModel()
self.fileModel.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.Files)
filter = ['*.vels', '*.act','*.tran','*.dat']
self.fileModel.setNameFilters(filter)
self.treeview.setModel(self.dirModel)
self.listview.setModel(self.fileModel)
self.treeview.setFont(font)
self.listview.setFont(font)
self.treeview.hideColumn(1)
self.treeview.hideColumn(2)
self.treeview.hideColumn(3)
self.treeview.setRootIndex(self.dirModel.index(path))
self.listview.setRootIndex(self.fileModel.index(path))
self.treeview.clicked.connect(self.on_clicked)
# self.cancel_button = QtGui.QPushButton('Close', self)
# self.layout.addWidget(self.cancel_button)
# self.cancel_button.clicked.connect(self.close)
#self.Ok_button.clicked.connect(self.get_radio)
def on_clicked(self, index):
path = self.dirModel.fileInfo(index).absoluteFilePath()
self.listview.setRootIndex(self.fileModel.setRootPath(path))
return
if __name__ == '__main__':
# app = QtWidgets.QApplication(sys.argv)
#w = show_symbols()
# w.show()
#sys.exit(app.exec_())
app = QtWidgets.QApplication([])
app.exec_()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/deploy/gke-marketplace-app/trt-engine/README.md",
"type": "Markdown"
}
|
<!--
# Copyright (c) 2021-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
# Instruction to create BERT engine for each Triton update
## Description
```
docker run --gpus all -it --network host \
--shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 \
-v ~:/scripts nvcr.io/nvidia/tensorrt:24.11-py3
pip install onnx six torch tf2onnx tensorflow
git clone -b main https://github.com/NVIDIA/TensorRT.git
cd TensorRT
git submodule update --init --recursive
export TRT_OSSPATH=/workspace/TensorRT
export TRT_LIBPATH=/lib/x86_64-linux-gnu
pushd /usr/local/bin && wget https://ngc.nvidia.com/downloads/ngccli_cat_linux.zip && unzip ngccli_cat_linux.zip && chmod u+x ngc-cli/ngc && rm ngccli_cat_linux.zip ngc-cli.md5 && ln -s ngc-cli/ngc ngc && echo "no-apikey\nascii\n" | ngc config set
popd
cd /workspace/TensorRT/demo/BERT
bash ./scripts/download_squad.sh
bash ./scripts/download_model.sh large 128
# bash ./scripts/download_model.sh large 384
mkdir -p engines
python3 builder.py -m models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/model.ckpt -o engines/bert_large_int8_bs1_s128.engine -b 1 -s 128 -c models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/ -v models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/vocab.txt --int8 --fp16 --strict --calib-num 1 -iln -imh
gsutil cp bert_large_int8_bs1_s128.engine gs://triton_sample_models/24.11/bert/1/model.plan
```
For each Triton upgrade, container version used to generate the model, and the model path in GCS `gs://triton_sample_models/24.11/` should be updated accordingly with the correct version.
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@deploy@gke-marketplace-app@[email protected]@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.