text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
sequencelengths 1
23
| text_hash
stringlengths 64
64
|
---|---|---|---|---|---|---|---|
########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts.
########################################################################
try:
import moogli
except Exception as e:
print( "[INFO ] Could not import moogli. Quitting..." )
quit()
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('../../../Demos/util')
import rdesigneur as rd
PI = 3.14159265359
useGssa = True
combineSegments = True
# Pick your favourite cell here.
#elecFileName = "ca1_minimal.p"
## Cell morphology from Bannister and Larkman J Neurophys 2015/NeuroMorpho
elecFileName = "h10.CNG.swc"
#elecFileName = "CA1.morph.xml"
#elecFileName = "VHC-neuron.CNG.swc"
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
frameRunTime = 1e-3 # 1 ms
baselineTime = 0.05
tetTime = 0.01
postTetTime = 0.01
runtime = baselineTime + tetTime + postTetTime
def buildRdesigneur():
'''
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
'''
cellProto = [ [ "./cells/" + elecFileName, "elec" ] ]
chanProto = [
['./chans/hd.xml'], \
['./chans/kap.xml'], \
['./chans/kad.xml'], \
['./chans/kdr.xml'], \
['./chans/na3.xml'], \
['./chans/nax.xml'], \
['./chans/CaConc.xml'], \
['./chans/Ca.xml'], \
['./chans/NMDA.xml'], \
['./chans/Glu.xml'] \
]
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
chemProto = []
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are:
# p, g, L, len, dia, maxP, maxG, maxL.
# where
# p = path distance from soma, threaded along dendrite
# g = geometrical distance from soma (shortest distance)
# L = electrotonic distance from soma: number of length constants
# len = length of dendritic compartment
# dia = diameter of dendritic compartment
# maxP = maximal value of 'p' for the cell
# maxG = maximal value of 'g' for the cell
# maxL = maximal value of 'L' for the cell
#
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
chanDistrib = [ \
["hd", "#dend#,#apical#", "Gbar", "5e-2*(1+(p*3e4))" ], \
["kdr", "#", "Gbar", "p < 50e-6 ? 500 : 100" ], \
["na3", "#soma#,#dend#,#apical#", "Gbar", "250" ], \
["nax", "#soma#,#axon#", "Gbar", "1250" ], \
["kap", "#axon#,#soma#", "Gbar", "300" ], \
["kap", "#dend#,#apical#", "Gbar", \
"300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
["Ca_conc", "#soma#,#dend#,#apical#", "tau", "0.0133" ], \
["kad", "#soma#,#dend#,#apical#", "Gbar", \
"300*H(p - 100e-6)*(1+p*1e4)" ], \
["Ca", "#dend#,#apical#", "Gbar", "p<160e-6? 10+ p*0.25e-6 : 50" ], \
["Ca", "#soma#", "Gbar", "10" ], \
["glu", "#dend#,#apical#", "Gbar", "200*H(p-200e-6)" ], \
["NMDA", "#dend#,#apical#", "Gbar", "2*H(p-200e-6)" ] \
]
'''
spineDistrib = [ \
["spine", '#apical#', "spineSpacing", "20e-6", \
"spineSpacingDistrib", "2e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
'''
spineDistrib = [
["spine", '#apical#',
"20e-6", "2e-6",
"1", "0.5",
"0", str( 2*PI ) ]
]
chemDistrib = []
spineProto = [['makeActiveSpine()', 'spine']]
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField offset scale
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = []
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
#rd.addSpineProto() # This adds a version with an LCa channel by default.
rdes = rd.rdesigneur(
useGssa = useGssa,
combineSegments = combineSegments,
stealCellFromLibrary = True,
passiveDistrib = passiveDistrib,
spineDistrib = spineDistrib,
chanDistrib = chanDistrib,
chemDistrib = chemDistrib,
cellProto = cellProto,
chanProto = chanProto,
chemProto = chemProto,
spineProto = spineProto,
adaptorList = adaptorList
)
#spineProto = spineProto, \
return rdes
def buildPlots( rdes ):
graphs = moose.Neutral( '/graphs' )
vtab = moose.Table( '/graphs/VmTab' )
moose.connect( vtab, 'requestOut', rdes.soma, 'getVm' )
def displayPlots():
pylab.figure(1, figsize = (8,10 ) )
pylab.subplot( 1,1,1)
for i in moose.wildcardFind( "/graphs/#VmTab" ):
t = numpy.arange( 0, i.vector.size, 1 ) * i.dt
pylab.plot( t, i.vector, label = i.name )
pylab.xlabel( "Time (s)" )
pylab.legend()
pylab.title( 'Vm' )
pylab.figure(2, figsize= (8,10))
ax = pylab.subplot( 1,1,1 )
neuron = moose.element( '/model/elec' )
comptDistance = dict( list(zip( neuron.compartments, neuron.pathDistanceFromSoma ) ))
for i in moose.wildcardFind( '/library/#[ISA=ChanBase]' ):
chans = moose.wildcardFind( '/model/elec/#/' + i.name )
print ( i.name, len( chans ) )
p = [ 1e6*comptDistance.get( j.parent, 0) for j in chans ]
Gbar = [ j.Gbar/(j.parent.length * j.parent.diameter * PI) for j in chans ]
if len( p ) > 2:
pylab.plot( p, Gbar, linestyle = 'None', marker = ".", label = i.name )
sortedGbar = sorted(zip(p, Gbar), key=lambda x: x[0])
ax.set_yscale( 'log' )
pylab.xlabel( "Distance from soma (microns)" )
pylab.ylabel( "Channel density (Seimens/sq mtr)" )
pylab.legend()
pylab.title( 'Channel distribution' )
pylab.show()
def create_vm_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.0,
1.0,
1.0),
moogli.colors.Color(1.0,
1.0,
0.0,
0.1)])
mapper = moogli.utilities.mapper(colormap, normalizer)
vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
network.set("color", vms, mapper)
def prelude(view):
view.pitch(PI/2)
view.zoom(0.4)
def interlude(view):
moose.start(frameRunTime)
vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
network.set("color", vms, mapper)
view.yaw(0.01)
currTime = moose.element('/clock').currentTime
if currTime < runtime:
deliverStim(currTime)
else:
view.stop()
def postlude(view):
displayPlots()
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(list(network.shapes.values()))
view = moogli.View("vm-view",
prelude=prelude,
interlude=interlude,
postlude=postlude)
viewer.attach_view(view)
return viewer
def create_ca_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
ca_elements = []
for compartment_path in list(network.shapes.keys()):
if moose.exists(compartment_path + '/Ca_conc'):
ca_elements.append(moose.element(compartment_path + '/Ca_conc'))
else:
ca_elements.append(moose.element('/library/Ca_conc'))
normalizer = moogli.utilities.normalizer(0.0,
0.002,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(1.0,
0.0,
0.0,
1.0),
moogli.colors.Color(0.0,
1.0,
1.0,
0.1)])
mapper = moogli.utilities.mapper(colormap, normalizer)
cas = [element.Ca for element in ca_elements]
network.set("color", cas, mapper)
def prelude(view):
view.pitch(PI/2)
view.zoom(0.4)
def interlude(view):
moose.start(frameRunTime)
cas = [element.Ca for element in ca_elements]
network.set("color", cas, mapper)
view.yaw(0.01)
currTime = moose.element('/clock').currentTime
if currTime < runtime:
deliverStim(currTime)
else:
view.stop()
viewer = moogli.Viewer("ca-viewer")
viewer.attach_shapes(list(network.shapes.values()))
view = moogli.View("ca-view",
prelude=prelude,
interlude=interlude)
viewer.attach_view(view)
return viewer
def build3dDisplay(rdes):
print (("building 3d Display"))
app = QtGui.QApplication(sys.argv)
vm_viewer = create_vm_viewer(rdes)
vm_viewer.resize(700, 900)
vm_viewer.show()
vm_viewer.start()
ca_viewer = create_ca_viewer(rdes)
ca_viewer.resize(700, 900)
ca_viewer.show()
ca_viewer.start()
return app.exec_()
def deliverStim( currTime ):
if currTime > baselineTime and currTime < baselineTime + tetTime:
# deliver tet stim
step = int ( (currTime - baselineTime) / frameRunTime )
tetStep = int( 1.0 / (tetanusFrequency * frameRunTime ) )
if step % tetStep == 0:
for i in synDendList:
i.activation( tetanusAmplitude )
for i in synSpineList:
i.activation( tetanusAmplitudeForSpines )
else:
# deliver probe stim
step = int (currTime / frameRunTime )
probeStep = int( probeInterval / frameRunTime )
if step % probeStep == 0:
print (("Doing probe Stim at ", currTime))
for i in synSpineList:
i.activation( probeAmplitude )
def main():
global synSpineList
global synDendList
numpy.random.seed( 1234 )
rdes = buildRdesigneur()
rdes.buildModel( '/model' )
assert( moose.exists( '/model' ) )
synSpineList = moose.wildcardFind( "/model/elec/#head#/glu,/model/elec/#head#/NMDA" )
temp = set( moose.wildcardFind( "/model/elec/#/glu,/model/elec/#/NMDA" ) )
synDendList = list( temp - set( synSpineList ) )
print (("num spine, dend syns = ", len( synSpineList ), len( synDendList )))
moose.reinit()
#for i in moose.wildcardFind( '/model/elec/#apical#/#[ISA=CaConcBase]' ):
#print i.path, i.length, i.diameter, i.parent.length, i.parent.diameter
buildPlots(rdes)
# Run for baseline, tetanus, and post-tetanic settling time
t1 = time.time()
build3dDisplay(rdes)
print (('real time = ', time.time() - t1))
if __name__ == '__main__':
main()
| BhallaLab/moose | moose-examples/paper-2015/Fig2_elecModels/Fig2C.py | Python | gpl-3.0 | 14,223 | [
"MOOSE",
"NEURON"
] | 5eb6a5a439a675762a02c12cdff996e6a0d98f6ee874773cba2951727562aac5 |
# creates: N.LDA
import os
from gpaw.test import gen
gen('N')
os.system('cp N.LDA ../_build')
| qsnake/gpaw | doc/setups/N.py | Python | gpl-3.0 | 94 | [
"GPAW"
] | ad7d53917d97406476db3321deeeb0fb89711b3341fa301373e89d7cf3800a42 |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import os
from neon.backends import gen_backend
from neon.data import DataIterator, load_mnist, load_text, Text
from neon.initializers import Gaussian, Constant
from neon.layers import GeneralizedCost, Affine, BatchNorm
from neon.layers import Dropout, Conv, Pooling, MergeConcat, Recurrent
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.util.persist import save_obj
def test_model_get_outputs_rnn(backend):
data_path = load_text('ptb-valid')
data_set = Text(time_steps=50, path=data_path)
# weight initialization
init = Constant(0.08)
# model initialization
layers = [
Recurrent(150, init, Logistic()),
Affine(len(data_set.vocab), init, bias=init, activation=Rectlin())
]
model = Model(layers=layers)
output = model.get_outputs(data_set)
assert output.shape == (data_set.ndata, data_set.seq_length, data_set.nclass)
def test_model_get_outputs(backend):
(X_train, y_train), (X_test, y_test), nclass = load_mnist()
train_set = DataIterator(X_train[:backend.bsz * 3])
init_norm = Gaussian(loc=0.0, scale=0.1)
layers = [Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
mlp = Model(layers=layers)
out_list = []
for x, t in train_set:
x = mlp.fprop(x)
out_list.append(x.get().T.copy())
ref_output = np.vstack(out_list)
train_set.reset()
output = mlp.get_outputs(train_set)
assert np.allclose(output, ref_output)
def test_model_serialize(backend):
(X_train, y_train), (X_test, y_test), nclass = load_mnist()
train_set = DataIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
path1 = [Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
Pooling(2),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
path2 = [Dropout(keep=0.5),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())]
layers = [MergeConcat([path1, path2]),
Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
BatchNorm(),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
tmp_save = 'test_model_serialize_tmp_save.pickle'
mlp = Model(layers=layers)
mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
n_test = 3
num_epochs = 3
# Train model for num_epochs and n_test batches
for epoch in range(num_epochs):
for i, (x, t) in enumerate(train_set):
x = mlp.fprop(x)
delta = mlp.cost.get_errors(x, t)
mlp.bprop(delta)
mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
if i > n_test:
break
# Get expected outputs of n_test batches and states of all layers
outputs_exp = []
pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs_exp.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Serialize model
save_obj(mlp.serialize(keep_states=True), tmp_save)
# Load model
mlp = Model(layers=layers)
mlp.load_weights(tmp_save)
outputs = []
pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
for i, (x, t) in enumerate(train_set):
outputs.append(mlp.fprop(x, inference=True))
if i > n_test:
break
# Check outputs, states, and params are the same
for output, output_exp in zip(outputs, outputs_exp):
assert np.allclose(output.get(), output_exp.get())
for pd, pd_exp in zip(pdicts, pdicts_exp):
for s, s_e in zip(pd['states'], pd_exp['states']):
if isinstance(s, list): # this is the batch norm case
for _s, _s_e in zip(s, s_e):
assert np.allclose(_s, _s_e)
else:
assert np.allclose(s, s_e)
for p, p_e in zip(pd['params'], pd_exp['params']):
if isinstance(p, list): # this is the batch norm case
for _p, _p_e in zip(p, p_e):
assert np.allclose(_p, _p_e)
else:
assert np.allclose(p, p_e)
os.remove(tmp_save)
if __name__ == '__main__':
be = gen_backend(backend='gpu', batch_size=50)
test_model_get_outputs_rnn(be)
| misko/neon | tests/test_model.py | Python | apache-2.0 | 5,468 | [
"Gaussian"
] | 3bfd6fb19f3b714563f6e85de7e32ae6cf3194700cb2bc8edfd82d289f9d24bc |
#!/usr/bin/env python
"""Extract read start from BAM files to Wig format for PAUSE.
Usage:
bam_to_wiggle.py <BAM file>
"""
import os
import tempfile
from contextlib import contextmanager
import pysam
import subprocess
import argparse
@contextmanager
def indexed_bam(bam_file):
if not os.path.exists(bam_file.name + ".bai"):
pysam.index(bam_file.name)
sam_reader = pysam.Samfile(bam_file.name, "rb")
yield sam_reader
sam_reader.close()
def gen_header(bam_file, suffix):
track_name = "name=%s_%s" % (
os.path.splitext(os.path.split(bam_file)[-1])[0],
suffix,
)
return "track type=wiggle_0 %s visibility=full\n" % track_name
def convert_to_bigwig(wig_file, chr_sizes, bw_file):
# This will be fine under Galaxy, but could use temp folder?
size_file = "%s-sizes.txt" % (os.path.splitext(bw_file)[0])
with open(size_file, "w") as out_handle:
for chrom, size in chr_sizes:
out_handle.write("%s\t%s\n" % (chrom, size))
try:
cl = ["wigToBigWig", wig_file, size_file, bw_file]
subprocess.check_call(cl)
finally:
os.unlink(size_file)
return bw_file
def start_data(bam_file, starts_f=None, starts_r=None):
with indexed_bam(bam_file) as work_bam:
starts_f_wig = tempfile.NamedTemporaryFile(delete=False)
starts_r_wig = tempfile.NamedTemporaryFile(delete=False)
sizes = zip(work_bam.references, work_bam.lengths)
regions = [(name, 0, length) for name, length in sizes]
for chrom, start, end in regions:
if end is None and chrom in work_bam.references:
end = work_bam.lengths[work_bam.references.index(chrom)]
assert end is not None, "Could not find %s in header" % chrom
# Since the file is sorted, we could actually optimise this bit
# out...currently fails cost benefit analysis so will wait until
# memory issues are reported.
start_map_f = {}
start_map_r = {}
for col in work_bam.fetch(chrom, start, end):
# print " ".join(map(str, [col.qstart, col.qend, col.rlen, col.aend, col.alen, col.pos]))
# qstart qend rlen aend alen pos
# 0 145 145 13537 143 13394
# reverse strand
# start is 13395
# end is 13537
if col.is_reverse:
rstart = col.aend
if rstart in start_map_r:
start_map_r[rstart] += 1
else:
start_map_r[rstart] = 1
else:
rstart = col.pos + 1
if rstart in start_map_f:
start_map_f[rstart] += 1
else:
start_map_f[rstart] = 1
# Write to file
starts_f_wig.write(gen_header(bam_file.name, "f"))
starts_f_wig.write("variableStep chrom=%s\n" % chrom)
for i in range(start + 1, end + 1):
if i in start_map_f:
starts_f_wig.write("%s %.1f\n" % (i, start_map_f[i]))
else:
starts_f_wig.write("%s 0.0\n" % i)
starts_r_wig.write(gen_header(bam_file.name, "r"))
starts_r_wig.write("variableStep chrom=%s\n" % chrom)
for i in range(start + 1, end + 1):
if i in start_map_r:
starts_r_wig.write("%s %.1f\n" % (i, start_map_r[i]))
else:
starts_r_wig.write("%s 0.0\n" % i)
starts_f_wig.close()
starts_r_wig.close()
try:
convert_to_bigwig(starts_f_wig.name, sizes, starts_f.name)
convert_to_bigwig(starts_r_wig.name, sizes, starts_r.name)
finally:
os.unlink(starts_f_wig.name)
os.unlink(starts_r_wig.name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract starts from BAM as BigWig")
parser.add_argument("bam_file", type=argparse.FileType("r"), help="Bam file")
parser.add_argument(
"--starts_f",
type=argparse.FileType("wb"),
default="starts.f.bw",
help="Sense Starts File",
)
parser.add_argument(
"--starts_r",
type=argparse.FileType("wb"),
default="starts.r.bw",
help="Antisense Starts File",
)
parser.add_argument("--version", action="version", version="0.1")
args = parser.parse_args()
start_data(**vars(args))
| TAMU-CPT/galaxy-tools | tools/pause/pause_starts_to_wiggle.py | Python | gpl-3.0 | 4,610 | [
"Galaxy",
"pysam"
] | 7a17a731153d43766a00672d66cbc22da6041df4aad39283a34c65b81a35440d |
#!/usr/bin/env python
""" check_pseudo.py calculates energy for 7 alat points near SIESTA equilibrium to fine tune the delta-factor.
"""
import os
import sys
import uuid
import glob
import numpy as np
import shutil
import matplotlib.pyplot as plt
from generate import PGInputFile, PTInputFile
from get_energies import read_energy
from calc_delta import BM, read_ref_data, calcDelta, get_alats, get_volumes
def check_pseudo(settings, data_dir):
""" Checks pseudopotential for delta factor calculation
Arguments:
settings {[type]} -- [description]
data_dir {[type]} -- [description]
"""
cwd = os.getcwd()
element = settings.calc["element"]
x, y = [], []
os.chdir(data_dir)
pseudo_file = glob.glob("*.psf")[0]
for root, dirs, _ in os.walk(os.getcwd()):
if "check" in root: continue
for dir_i in dirs:
try:
alat = float(dir_i)
except:
continue
energies = read_energy(element, alat)
if energies is not None:
x_i, y_i = energies
x.append(x_i)
y.append(y_i)
x = np.array(x) / settings.calc["nat"]
y = np.array(y) / settings.calc["nat"]
p = np.polyfit(x, y, 2)
# make 7 points out of existing data
if len(x) == 7:
x_p = x
y_p = y
else:
x_p = get_volumes(7, settings.calc) / settings.calc["nat"]
y_p = np.poly1d(p)(x_p)
# get check directory
if not os.path.exists("check"):
os.makedirs("check")
shutil.copy(pseudo_file, "check")
os.chdir("check")
# write original data
np.savetxt("energies_original.txt", np.vstack((x, y)).T)
vol, bulk_mod, bulk_deriv, _ = BM(np.vstack((x_p, y_p)).T)
np.savetxt("energies_BM.txt", np.vstack((x_p, y_p)).T)
our_data = np.core.records.fromrecords([(element, vol, bulk_mod, bulk_deriv), ], names=('element', 'V0', 'B0', 'BP'))
ref_data = read_ref_data(os.path.join(cwd, "delta", "WIEN2k.txt"))
ref_data_el = ref_data[ref_data['element'] == element]
delta, delta_rel, _ = calcDelta(our_data, ref_data_el, useasymm=False)
with open("BP.dat", "w") as f:
f.write("Our data: {}\n".format(our_data))
f.write("Reference data: {}\n".format(ref_data_el))
f.write("Delta factor: {} {}\n".format(delta, delta_rel))
| ansobolev/PseudoGenerator | pseudogen/check_pseudo.py | Python | mit | 2,386 | [
"SIESTA",
"WIEN2k"
] | 2286a65136ae498e930e31d1f7c6bfcf92c0cc82d6b4540635ee0de03e12cad9 |
from copy import deepcopy as dc
from itertools import combinations
import ase.io as aseio
import numpy as np
from ase.atoms import Atoms as AAtoms
from pyiid.asa import calculate_asa, get_neighbor_list, get_coordination
__author__ = 'christopher'
def convert_stru_to_atoms(stru):
symbols = []
xyz = []
tags = []
for d_atom in stru:
symbols.append(d_atom.element)
xyz.append(d_atom.xyz)
tags.append(d_atom.label)
atoms = AAtoms(symbols, np.array(xyz), tags=tags)
return atoms
def build_sphere_np(file_name, radius):
"""
Build a spherical nanoparticle
:param file_name: ASE loadable atomic positions
:param radius: Radius of particle in Angstroms
:return:
"""
atoms = aseio.read(file_name)
cell_dist = atoms.get_cell()
multiple = np.ceil(2 * radius / cell_dist.diagonal()).astype(int)
atoms = atoms.repeat(multiple)
com = atoms.get_center_of_mass()
atoms.translate(-com)
del atoms[[atom.index for atom in atoms
if np.sqrt(np.dot(atom.position, atom.position)) >=
np.sqrt(radius ** 2)]]
atoms.center()
atoms.set_pbc((False, False, False))
return atoms
def tag_surface_atoms(atoms, tag=1, probe=1.4, cutoff=None):
"""
Find which are the surface atoms in a nanoparticle.
Parameters
----------
atoms: ase.atoms object
The atomic configuration
tag: int
The number with which to tag the surface atoms
probe: float, optional
Radius of the probe molecule, default is 1.4 A the radius of water
cutoff: float
Bond cutoff, defaults to van der Waals radius
"""
calculate_asa(atoms, probe, tag=tag, cutoff=cutoff)
def add_ligands(ligand, surface, distance, coverage, head, tail):
atoms = dc(surface)
tag_surface_atoms(atoms)
for atom in atoms:
if atom.tag == 1 and np.random.random() < coverage:
pos = atom.position
com = surface.get_center_of_mass()
disp = pos - com
norm_disp = disp / np.sqrt(np.dot(disp, disp))
l_length = ligand[tail].position - ligand[head].position
norm_l_length = l_length / np.sqrt(np.dot(l_length, l_length))
ads = dc(ligand)
ads.rotate(norm_l_length, a=norm_disp)
ads.translate(-ads[head].position)
ads.translate(pos + distance * norm_disp)
atoms += ads
return atoms
def get_angle_list(atoms, cutoff, element=None, tag=None):
"""
Get all the angles in the NP
Parameters
----------
atoms: ase.Atoms objecct
The atomic configuration
cutoff: float
Bond length cutoff
element: str, optional
Limit the list to only this element
tag: int
Limt the list to only this tag
Returns
-------
ndarray:
The list of bond angles in degrees
"""
n_list = list(get_neighbor_list(cutoff, atoms))
angles = []
for i in range(len(atoms)):
z = list(combinations(n_list[i], 2))
for a in z:
if (element is not None and atoms[i].symbol != element) or \
(tag is not None and atoms[i].tag != tag):
break
angles.append(np.rad2deg(atoms.get_angle([a[0], i, a[1]])))
return np.nan_to_num(np.asarray(angles))
def get_coord_list(atoms, cutoff, element=None, tag=None):
"""
Get all the angles in the NP
Parameters
----------
atoms: ase.Atoms object
The atomic configuration
cutoff: float
Bond length cutoff
element: str, optional
Limit the list to only this element
tag: int
Limt the list to only this tag
Returns
-------
ndarray:
The list of coordination nubmers
"""
if isinstance(atoms, list):
coord_l = []
for atms in atoms:
a = get_coordination(cutoff, atms)
if element is not None and tag is not None:
coord_l.append(
a[(np.asarray(atoms.get_chemical_symbols()) == element) &
(atoms.get_tags() == tag)])
elif element is not None:
coord_l.append(
a[np.asarray(atoms.get_chemical_symbols()) == element])
elif tag is not None:
coord_l.append(a[atoms.get_tags() == tag])
else:
coord_l.append(a)
c = np.asarray(coord_l)
return np.average(c, axis=0), np.std(c, axis=0)
else:
a = get_coordination(cutoff, atoms)
if element is not None and tag is not None:
return a[(np.asarray(atoms.get_chemical_symbols()) == element) &
(atoms.get_tags() == tag)]
elif element is not None:
return a[np.asarray(atoms.get_chemical_symbols()) == element]
elif tag is not None:
return a[atoms.get_tags() == tag]
else:
return a
def get_bond_dist_list(atoms, cutoff, element=None, tag=None):
"""
Get all the angles in the NP
Parameters
----------
atoms: ase.Atoms objecct
The atomic configuration
cutoff: float
Bond length cutoff
element: str, optional
Limit the list to only this element
tag: int
Limt the list to only this tag
Returns
-------
ndarray:
The list of bond distances
"""
n_list = list(get_neighbor_list(cutoff, atoms))
bonds = []
for i in range(len(atoms)):
for a in n_list[i]:
if (element is not None and atoms[i].symbol != element) or \
(tag is not None and atoms[i].tag != tag):
break
bonds.append(atoms.get_distance(i, a))
return np.nan_to_num(np.asarray(bonds))
| CJ-Wright/pyIID | pyiid/utils.py | Python | bsd-3-clause | 5,803 | [
"ASE"
] | 5e32988f1ea4991d436343938a03c8967054e4336fc3660a3273e5bdda9ddf19 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A app configuration defines the user-tunable parameters of the application and also the quality evaluation such as the:
* Amazon Mechanical Turk HIT description, pricing, keywords, etc.
* The description and instructions of the task
* The configuration of the type of test (e.g 'mushra' or 'pairwise')
* The definition of the quality scales
* The paths to the audio stimuli
* Which components of the evaluation are active (e.g. pre-test survey, post-test survey, hearing screening, etc.)
This subpackage contains a base configuration which contains overridable defaults, as well as pre-defined testing
configurations for common audio quality evaluation scenarios. Make sure that before you run a test that you at least
change the stimuli and the ``SERVER_ADDRESS`` variable.
.. seealso:: :doc:`../test_configurations`
"""
import os
try:
from secret_keys import CSRF_SECRET_KEY, SESSION_KEY
except ImportError:
try:
CSRF_SECRET_KEY = os.environ['CSRF_SECRET_KEY']
SESSION_KEY = os.environ['SESSION_KEY']
except KeyError:
raise KeyError('No keys found. Either define a secret_keys.py file (using generate_key_files.py) or set the '
'keys using environment variables.')
# Get the application mode from the environment variable APP_MODE
APP_MODE = os.getenv('APP_MODE')
# HEARING TEST CONSTANTS
MIN_HEARING_TEST_AUDIO_TONES = 2
MAX_HEARING_TEST_AUDIO_TONES = 8
HEARING_TEST_AUDIO_FILES_PER_TONES = 4
MIN_HEARING_TEST_AUDIO_INDEX = HEARING_TEST_AUDIO_FILES_PER_TONES * MIN_HEARING_TEST_AUDIO_TONES
MAX_HEARING_TEST_AUDIO_INDEX = HEARING_TEST_AUDIO_FILES_PER_TONES * MAX_HEARING_TEST_AUDIO_TONES
# HEARING RESPONSE ESTIMATION CONSTANTS
HEARING_RESPONSE_NFREQS = 8 # number of different frequencies
HEARING_RESPONSE_NADD = 3 # number of max additional tones (60 for 10dB, 3 for 20dB Spacing)
class BaseConfig(object):
"""
The base application configuration.
Attributes
----------
DEBUG : bool
Enable/disable debug mode (see Flask docs) (default is False)
TESTING : bool
Enable/disable testing mode (see Flask docs) (default is False)
SECRET_KEY : str
If a secret key is set, cryptographic components can use this to sign cookies and other things. Set this to a
complex random value when you want to use the secure cookie for instance. Set via `generate_key_file.py` or
using environment variable 'SECRET_KEY'. (see Flask Docs)
CSRF_SESSION_KEY : str
A Cross-site Request Forgery (CSRF) secret key for signing data. Set via `generate_key_file.py` or
using environment variable 'CSRF_SESSION_KEY'. (see Flask docs)
CSRF_ENABLED : bool
Enable/disable protection against *Cross-site Request Forgery (CSRF)* (see Flask docs) (default is True)
SERVER_ADDRESS : str
The name and port number of the server. Do not include 'http'. (e.g.: 'caqe.local:5000') (see Flask docs)
Can be set via environment variable 'SERVER_ADDRESS'. (default is 'caqe.local:5000')
SQLALCHEMY_DATABASE_URI : str
The database URI that should be used for the connection (see Flask-SQLAlchemy docs). Examples:
* sqlite:////tmp/test.db
* mysql://username:password@server/db
Can be set via environment variable 'DATABASE_URL'. (default is'sqlite:////~/caqe.db')
PREFERRED_URL_SCHEME : str
The URL scheme that should be used for URL generation if no URL scheme is available. 'http' or 'https'
(default is 'https')
AUDIO_FILE_DIRECTORY : str
Relative directory path to testing audio stimuli. (default is 'static/audio')
ENCRYPT_AUDIO_STIMULI_URLS : bool
Enable/disable encryption of the URLs so that users can't game consistency. (default is True)
TEST_TYPE : str
The test type (limited to 'pairwise' or 'mushra' for now). (default is None)
ANONYMOUS_PARTICIPANTS_ENABLED : bool
Enable/disable participants to enter through '/anonymous' entry point. (default is False)
IP_COLLECTION_ENABLED : bool
Enable/disable collection participants' IP addresses. (default is True)
OBTAIN_CONSENT : bool
If True, obtain consent from each participant (see consent.html) (default is True)
PRE_TEST_SURVEY_ENABLED : bool
If True, ask participants a survey before evaluation (see pre_test_survey.html). (default is True)
PRE_TEST_SURVEY_INCLUSION_CRITERIA : list of str
Pre-test survey inclusion criteria.
(default is ["int(survey['age']) >= 18", "survey['hearing_disorder'] == 'No'"])
POST_TEST_SURVEY_ENABLED : bool
If True, ask participants a survey after evaluation (see post_test_survey.html) (default is True)
HEARING_RESPONSE_ESTIMATION_ENABLED : bool
If enabled, ask participants to complete the in-situ hearing response estimation. (default is True)
CONDITIONS_PER_EVALUATION : int
The number of conditions to present to a participant in a single visit to '/evaluate'.
Note that currently evaluation is limited to one condition group. So if this value is more than 1, there must
be at least as many conditions per group as there are conditions per evaluation for this to have an effect.
It is also recommended that an integer multiple of `CONDITIONS_PER_EVALUATION` comprise the number of conditions
per group. For example, if there are 28 conditions in a group, set the number of `CONDITIONS_PER_EVALUATION` to
14 or 7.
(default is 1)
TRIALS_PER_CONDITION : int
The number of trials we should collect per condition (with distinct participants). (default is 20)
LIMIT_SUBJECT_TO_ONE_TASK_TYPE : bool
If True, each subject is limited to one type of Test. (default is True)
TEST_CONDITION_ORDER_RANDOMIZED : bool
Randomize the condition order per test for each participant. (default is True)
TEST_CONDITION_GROUP_ORDER_RANDOMIZED : bool
Randomize the condition group order for each participant. (default is False)
STIMULUS_ORDER_RANDOMIZED : bool
Randomize the stimulus order per for each condition. (default is True)
HEARING_SCREENING_TEST_ENABLED : bool
Set to True if you want the participants to be required to take a hearing screening test. (default is True)
HEARING_TEST_EXPIRATION_HOURS : int
The number of hours their hearing test is valid for (they must retake after this time has passed)
(default is 24)
MAX_HEARING_TEST_ATTEMPTS : int
The number of attempts one has before they are sent away (they must wait `hearing_test_expiration_hours`
to take it again) (default is 2)
HEARING_TEST_REJECTION_ENABLED : bool
If this is set to True, then we still test the users, but we don't reject them. (default is True)
HEARING_RESPONSE_NOPTIONS : int
Max number of frequencies for user to respond with in hearing response estimation. (default is 20)
MTURK_HOST : str
Amazon Mechanical Turk host location. By default set it to the sandbox, and configure it via an environment
variable (so, it can be easily modified when deploying and testing using Heroku).
Can be set via environment variable 'MTURK_HOST'. (default is 'mechanicalturk.sandbox.amazonaws.com')
MTURK_QUESTION_URL : str
Entry point URL. (default is 'https://%s/mturk' % SERVER_ADDRESS)
MTURK_REWARD : float
This is the reward given to each worker for an approved assignment (in USD)
(note that Amazon takes their Mechanical Turk Fee on top of this. See https://requester.mturk.com/pricing)
(default is 0.50)
MTURK_FIRST_HIT_BONUS : float
The default bonus reward in USD that is optionally given (using ``turk_admin_cli.py``) to participants that
completed the first assignment, which may have additional testing (e.g. survey, hearing tests, etc.)
(default is 0.30)
MTURK_MAX_CONSISTENCY_BONUS : float
The defualt maximum bonus reward in USD for pairwise consistency. This optional bonus is given using
``turk_admin_cli.py``. (default is 0.25)
MTURK_MIN_CONSISTENCY_THRESHOLD_FOR_BONUS : float
The minimum pairwise consistency required to receive the optional bonus (given through ``turk_admin_cli.py``.)
(default is 0.7)
MTURK_NUMBER_HITS_APPROVED_REQUIREMENT : int
MTurk worker must have this many approved HITs to accept task. (default is 1000)
MTURK_PERCENT_ASSIGNMENTS_APPROVED_REQUIREMENT : int
MTurk worker must have this percentage of approved assignments to accept task. (default is 97)
MTURK_TITLE : str
Title of MTurk HIT (default is 'Critical audio listening task. Listen to audio recordings and rate them on
various scales of quality.')
MTURK_DESCRIPTION : str
Description of MTurk HIT.
(default is 'This listening test aims to rate the quality of a set of signals in comparison to a reference
signal. Note that while the maximum number assignments a worker can do is 10, it's possible that fewer than
10 may be available to you. \*\*CHROME ONLY\*\* \*\*BONUS AVAILABLE\*\*')
MTURK_KEYWORDS : str
Keywords for MTurk HIT. (default is 'audio, sound, music, listening, research')
MTURK_ASSIGNMENT_DURATION_IN_SECONDS : int
Accepted MTurk assignments must be completed within this duration or they will be released to other workers
(default is 60 * 30, i.e. 30 minutes)
MTURK_LIFETIME_IN_SECONDS : int
HITs expire (no one can accept them) after this duration since being posted.
(default is 60 * 60 * 24 * 7, i.e 1 week)
MTURK_FRAME_HEIGHT : int
The size of the Mechanical Turk browser frame (default is 1200)
ACCEPTABLE_BROWSERS : list of str
The set of acceptable browsers. set as None to disable browser rejection. (default is ['chrome',])
BEGIN_BUTTON_ENABLED : bool
If true, participants will have to click a button that launches a new window. This is useful in order to
delay condition assignment until the user is engaged in the task, and allows a new window to be launched
that is bigger than the Mechanical Turk frame for instance. (default is True)
POPUP_WIDTH : int
The width of the window launched when participants press the "begin button" the task. (default is 1200)
POPUP_HEIGHT : int
The height of the window launched when participants press the "begin button" the task. (default is 1200)
TEST_TIMEOUT_SEC : float
The participant must spend at least this amount of time on the evaluation task before submission.
(default is 60.)
REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS : bool
If True, the participant must listen to all of the training sounds before proceeding to the evaluation task.
(default is True)
PREVIEW_HTML : str
The HTML content of the preview page. This will be the same for all conditions, regardless of test since
conditions are assigned on the fly (so we can have complete control over condition assignment).
(default is None)
MIN_RATING_VALUE : int
The minimum rating value on the MUSHRA slider. (default is 0)
MAX_RATING_VALUE : int
The maximum rating value on the MUSHRA slider. (default is 99)
DEFAULT_RATING_VALUE : int
The default rating value on the MUSHRA slider. (default is 50)
TESTS : list of dict
The test and condition-specific configuration variables.
Note that if 'evaluation_instructions_html' is not None in the condition, it will override the instructions
defined in the test.
Note also that reference keys must be alphanumeric and stimulus keys must begin with 'S' followed by a number,
e.g. 'S29'.
The dicts are of the form::
{'test_config_variables':
{'test_title': '...', # The test title that is displayed on the evaluation page
'first_task_introduction_html': '...', # Content of the intro page the first time they do a task
'introduction_html': '...', # Content of the intro page (after the first time they perform the task)
'training_instructions_html': '...', # The HTML content of the training instructions
'evaluation_instructions_html': '...'}, # The HTML content of the evaluation instructions
'references' : (('<reference_name>', '<reference_description>'),), # Reference names and descriptions
'reference_example_dict':
{'<reference_name}': url_for('static', filename='audio/<reference_filename>.wav'), ... },
'quality_example_dict':
{'<example_type0>': [url_for('static', filename='audio/<example0_filename>.wav'),
url_for('static', filename='audio/<example1_filename>.wav'),],
'<example_type1>': [url_for('static', filename='audio/<example3_filename>),]}},
'condition_groups' :
[{'reference_files': {<reference_name>: '<reference_filename>.wav',},
{'stimulus_files': {'S1': '<S1_filename>.wav',
'S2': '<S2_filename>,wav',}},
{'conditions': [{'reference_keys': [<reference_name>,],
'stimulus_keys': ['S1','S2','S7', ... ],
'evaluation_instructions_html': <condition_specific_evaluation_instructions>},]},]}
(default is [])
Note
----
For testing, add: ::
0.0.0.0 caqe.local
to /etc/hosts
We need to set the SERVER_ADDRESS to resolve ``url_for`` definitions when constructing the database, but we can't simply
use `localhost` because the secure sessions are not compatible with that.
"""
# ---------------------------------------------------------------------------------------------
# BACKEND VARIABLES
TESTING = False
DEBUG = False
SECRET_KEY = CSRF_SECRET_KEY
CSRF_SESSION_KEY = SESSION_KEY
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:////%s' % os.path.expanduser('~/caqe.db'))
SERVER_ADDRESS = os.getenv('SERVER_ADDRESS', 'caqe.local:5000')
PREFERRED_URL_SCHEME = 'https'
AUDIO_FILE_DIRECTORY = os.getenv('AUDIO_FILE_DIRECTORY', 'static/audio')
AUDIO_CODEC = 'wav'
ENCRYPT_AUDIO_STIMULI_URLS = True
EXTERNAL_FILE_HOST = False
BEGIN_TITLE = 'Audio Quality Evaluation'
# ---------------------------------------------------------------------------------------------
# TESTING VARIABLES
TEST_TYPE = None
ANONYMOUS_PARTICIPANTS_ENABLED = False
IP_COLLECTION_ENABLED = True
OBTAIN_CONSENT = False
PRE_TEST_SURVEY_ENABLED = True
PRE_TEST_SURVEY_INCLUSION_CRITERIA = ["int(survey['age']) >= 18",
"survey['hearing_disorder'] == 'No'"]
POST_TEST_SURVEY_ENABLED = True
HEARING_RESPONSE_ESTIMATION_ENABLED = True
CONDITIONS_PER_EVALUATION = 1
TRIALS_PER_CONDITION = 20
LIMIT_SUBJECT_TO_ONE_TASK_TYPE = True
TEST_CONDITION_ORDER_RANDOMIZED = True
TEST_CONDITION_GROUP_ORDER_RANDOMIZED = False
STIMULUS_ORDER_RANDOMIZED = True
# ---------------------------------------------------------------------------------------------
# HEARING SCREENING VARIABLES
HEARING_SCREENING_TEST_ENABLED = True
HEARING_TEST_EXPIRATION_HOURS = 12
MAX_HEARING_TEST_ATTEMPTS = 2
HEARING_TEST_REJECTION_ENABLED = True
# ---------------------------------------------------------------------------------------------
# HEARING RESPONSE ESTIMATION VARIABLES
HEARING_RESPONSE_NOPTIONS = 20
# ---------------------------------------------------------------------------------------------
# MECHANICAL TURK VARIABLES
MTURK_HOST = os.getenv('MTURK_HOST', 'mechanicalturk.sandbox.amazonaws.com')
MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
MTURK_REWARD = 0.50
MTURK_FIRST_HIT_BONUS = 0.30
MTURK_MAX_CONSISTENCY_BONUS = 0.25
MTURK_MIN_CONSISTENCY_THRESHOLD_FOR_BONUS = 0.7
MTURK_NUMBER_HITS_APPROVED_REQUIREMENT = 1000
MTURK_PERCENT_ASSIGNMENTS_APPROVED_REQUIREMENT = 97
MTURK_TITLE = 'Critical audio listening task. Listen to audio recordings and rate them on various ' \
'scales of quality.'
MTURK_DESCRIPTION = 'This listening test aims to rate the quality of a set of signals in comparison to a reference ' \
'signal. Note that while the maximum number assignments a worker can do is 10, it\'s possible that ' \
'fewer than 10 may be available to you. **CHROME ONLY** **BONUS AVAILABLE**'
MTURK_KEYWORDS = 'audio, sound, music, listening, research'
MTURK_ASSIGNMENT_DURATION_IN_SECONDS = 60 * 30
MTURK_LIFETIME_IN_SECONDS = 60 * 60 * 24 * 7
MTURK_MAX_ASSIGNMENTS = 200
MTURK_AUTO_APPROVAL_DELAY_IN_SECONDS = 60 * 60 * 24 * 1 # 1 day
MTURK_FRAME_HEIGHT = 1200
# ---------------------------------------------------------------------------------------------
# FRONT-END VARIABLES
ACCEPTABLE_BROWSERS = ['chrome']
BEGIN_BUTTON_ENABLED = True
POPUP_WIDTH = 1200
POPUP_HEIGHT = 1200
TEST_TIMEOUT_SEC = 60.
REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS = True
PREVIEW_HTML = None
MIN_RATING_VALUE = 0
MAX_RATING_VALUE = 99
DEFAULT_RATING_VALUE = 50
# ---------------------------------------------------------------------------------------------
# DEFAULT CONDITION AND TEST-SPECIFIC VARIABLES
# (These will be configured for each condition and saved in the database)
TESTS = []
class TestingOverrideConfig(object):
"""
Override config for testing.
Note
----
To enable these parameters set environment variable ``APP_MODE`` to 'TESTING'. In Linux: ::
$ export APP_MODE=TESTING
"""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
SERVER_ADDRESS = 'caqe.local:5000'
MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
PREFERRED_URL_SCHEME = 'http'
class DevelopmentOverrideConfig(object):
"""
Override config for development.
Note
----
To enable these parameters set environment variable ``APP_MODE`` to 'DEVELOPMENT'. In Linux: ::
$ export APP_MODE=DEVELOPMENT
"""
DEBUG = True
SERVER_ADDRESS = 'caqe.local:5000'
MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
HEARING_TEST_REJECTION_ENABLED = False
PREFERRED_URL_SCHEME = 'http'
REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS = False
class ProductionOverrideConfig(object):
"""
Override config for production.
Note
----
To enable these parameters set environment variable ``APP_MODE`` to 'PRODUCTION'. In Linux: ::
$ export APP_MODE=PRODUCTION
"""
TESTING = False
DEBUG = False
class EvaluationDevOverrideConfig(object):
"""
Override config for evaluation task development.
Note
----
To enable these parameters set environment variable ``APP_MODE`` to 'EVALUATION'. In Linux: ::
$ export APP_MODE=EVALUATION
"""
DEBUG = True
SERVER_ADDRESS = 'caqe.local:5000'
MTURK_QUESTION_URL = 'https://%s/mturk' % SERVER_ADDRESS
HEARING_TEST_REJECTION_ENABLED = False
HEARING_SCREENING_TEST_ENABLED = False
HEARING_RESPONSE_ESTIMATION_ENABLED = False
PREFERRED_URL_SCHEME = 'http'
REQUIRE_LISTENING_TO_ALL_TRAINING_SOUNDS = False
PRE_TEST_SURVEY_ENABLED = False
POST_TEST_SURVEY_ENABLED = False
| mcartwright/CAQE | src/caqe/configuration.py | Python | mit | 19,785 | [
"VisIt"
] | 60f1965a4f5b55df7d2bb1ddb9a6d553291e0b68e9e279e55f56f6f2698d3754 |
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime is invoked with the right options.
# Adapted from https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/1.16.16/peptide-shaker.py (accessed June, 21th 2019).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'GeMoMa-1.7.1.jar'
default_jvm_mem_opts = ['-Xms1g', '-Xmx2g']
original_string = "java -jar "+jar_file+" CLI"
wrapper_string = "GeMoMa"
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
jar_dir = real_dirname(sys.argv[0])
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
cli = 'CLI'
cmd = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + [cli] + pass_args
# print('wrapper script translating:')
# print(sys.argv)
# print('to:')
# print(cmd)
# print('=======================================================================================================================\n')
# print(original_string)
# print(wrapper_string)
#sys.exit(subprocess.call(cmd))
p = subprocess.Popen(cmd,stderr=subprocess.PIPE);
for line in iter(p.stderr.readline,b''):
tomod = line.decode("utf-8")
tomod = tomod.replace(original_string,wrapper_string)
print(tomod,end='',file=sys.stderr)
exit(p.wait())
if __name__ == '__main__':
main()
| cokelaer/bioconda-recipes | recipes/gemoma/GeMoMa.py | Python | mit | 3,169 | [
"Bioconda"
] | 018ca2619f82a0002e2334d695e8fe532aec2293d4d5bda0711ecab68d30118d |
# sql/elements.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`.ColumnElement` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`.ColumnElement`
subclass, such as a :class:`~sqlalchemy.schema.Column` object.
Use this function to force the
generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expr, type_):
"""Coerce the given expression into the given type,
on the Python side only.
:func:`.type_coerce` is roughly similar to :func:`.cast`, except no
"CAST" expression is rendered - the given type is only applied towards
expression typing and against received result values.
e.g.::
from sqlalchemy.types import TypeDecorator
import uuid
class AsGuid(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
return str(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return uuid.UUID(value)
else:
return None
conn.execute(
select([type_coerce(mytable.c.ident, AsGuid)]).\\
where(
type_coerce(mytable.c.ident, AsGuid) ==
uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
)
)
"""
type_ = type_api.to_instance(type_)
if hasattr(expr, '__clause_expr__'):
return type_coerce(expr.__clause_expr__())
elif isinstance(expr, BindParameter):
bp = expr._clone()
bp.type = type_
return bp
elif not isinstance(expr, Visitable):
if expr is None:
return Null()
else:
return literal(expr, type_=type_)
else:
return Label(None, expr, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all :class:`.ColumnElement`
subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
_order_by_label_element = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:func:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(ClauseElement, operators.ColumnOperators):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
:class:`.ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`.ColumnElement` may be associated with
a :class:`.Selectable` which was derived from another :class:`.Selectable`.
An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a
:class:`~sqlalchemy.schema.Table`. For the ambitious, an in-depth
discussion of this concept can be found at
`Expression Transformations <http://techspot.zzzeek.org/2008/01/23/expression-transformations/>`_.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
_key_label = None
_alt_names = ()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class BindParameter(ColumnElement):
"""Represent a bound parameter value.
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Construct a new :class:`.BindParameter`.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
Defaults to ``None``, however if neither ``value`` nor
``callable`` are passed explicitly, the ``required`` flag will be
set to ``True`` which has the effect of requiring a value be present
when the statement is actually executed.
.. versionchanged:: 0.8 The ``required`` flag is set to ``True``
automatically if ``value`` or ``callable`` is not passed.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`BindParameter` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`BindParameter` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
If ``True``, a value is required at execution time. If not passed,
is set to ``True`` or ``False`` based on whether or not
one of ``value`` or ``callable`` were passed..
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter.
.. seealso::
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text='',
bind=None,
bindparams=None,
typemap=None,
autocommit=None):
"""Construct a new :class:`.TextClause` clause.
E.g.::
fom sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
To invoke SQLAlchemy typing logic for bind parameters, the
``bindparams`` list allows specification of :func:`bindparam`
constructs which specify the type for a given name::
t = text("SELECT id FROM users WHERE updated_at>:updated",
bindparams=[bindparam('updated', DateTime())]
)
Typing during result row processing is also an important concern.
Result column types
are specified using the ``typemap`` dictionary, where the keys
match the names of columns. These names are taken from what
the DBAPI returns as ``cursor.description``::
t = text("SELECT id, name FROM users",
typemap={
'id':Integer,
'name':Unicode
}
)
The :func:`text` construct is used internally for most cases when
a literal string is specified for part of a larger query, such as
within :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`text` construct that should be subject to "autocommit"
can be set explicitly so using the ``autocommit`` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`text` constructs - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
:param typemap:
a dictionary mapping the names of columns represented in the
columns clause of a ``SELECT`` statement to type objects,
which will be used to perform post-processing on columns within
the result set. This argument applies to any expression
that returns result sets.
"""
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=Tru'
'e)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if typemap is not None:
for key in typemap:
typemap[key] = type_api.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self.bindparams = dict((b.key, clone(b, **kw))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return list(self.bindparams.values())
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
"""
__visit_name__ = 'null'
def __init__(self):
"""Return a :class:`Null` object, which compiles to ``NULL``.
"""
self.type = type_api.NULLTYPE
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword in a SQL statement.
"""
__visit_name__ = 'false'
def __init__(self):
"""Return a :class:`False_` object.
"""
self.type = type_api.BOOLEANTYPE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword in a SQL statement.
"""
__visit_name__ = 'true'
def __init__(self):
"""Return a :class:`True_` object.
"""
self.type = type_api.BOOLEANTYPE
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = type_api.to_instance(kwargs.get('type_',
type_api.BOOLEANTYPE))
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self.type = kw.pop('type_', None)
if self.type is None:
self.type = _type_from_args(clauses)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class Case(ColumnElement):
"""Represent a SQL ``CASE`` construct.
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a :class:`.Case` object.
:param whens: A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
:param value: Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
:param else\_: Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the :func:`literal_column`
construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`.literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100,
literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'",
String))
], else_=literal_column("'lethan10'", String))
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent the SQL ``CAST`` construct."""
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
"""Return a :class:`.Cast` object.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
:class:`.Cast` is available using :func:`.cast` or alternatively
``func.cast`` from the :data:`.func` namespace.
"""
self.type = type_api.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).\
self_group(against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
@classmethod
def _create_nullsfirst(cls, column):
"""Return a NULLS FIRST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullsfirst())
produces::
ORDER BY mycol DESC NULLS FIRST
"""
return UnaryExpression(column, modifier=operators.nullsfirst_op)
@classmethod
def _create_nullslast(cls, column):
"""Return a NULLS LAST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullslast())
produces::
ORDER BY mycol DESC NULLS LAST
"""
return UnaryExpression(column, modifier=operators.nullslast_op)
@classmethod
def _create_desc(cls, column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol))
produces::
ORDER BY mycol DESC
"""
return UnaryExpression(column, modifier=operators.desc_op)
@classmethod
def _create_asc(cls, column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(asc(table1.mycol))
produces::
ORDER BY mycol ASC
"""
return UnaryExpression(column, modifier=operators.asc_op)
@classmethod
def _create_distinct(cls, expr):
"""Return a ``DISTINCT`` clause.
e.g.::
distinct(a)
renders::
DISTINCT a
"""
expr = _literal_as_binds(expr)
return UnaryExpression(expr,
operator=operators.distinct_op, type_=expr.type)
@util.memoized_property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator,
against):
return Grouping(self)
else:
return self
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expresion::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=type_api.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
@util.memoized_property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`.Selectable`.
:class:`.ColumnClause` is constructed by itself typically via
the :func:`~.expression.column` function. It may be placed directly
into constructs such as :func:`.select` constructs::
from sqlalchemy.sql import column, select
c1, c2 = column("c1"), column("c2")
s = select([c1, c2]).where(c1==5)
There is also a variant on :func:`~.expression.column` known
as :func:`~.expression.literal_column` - the difference is that
in the latter case, the string value is assumed to be an exact
expression, rather than a column name, so that no quoting rules
or similar are applied::
from sqlalchemy.sql import literal_column, select
s = select([literal_column("5 + 7")])
:class:`.ColumnClause` can also be used in a table-like
fashion by combining the :func:`~.expression.column` function
with the :func:`~.expression.table` function, to produce
a "lightweight" form of table metadata::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The above construct can be created in an ad-hoc fashion and is
not associated with any :class:`.schema.MetaData`, unlike it's
more full fledged :class:`.schema.Table` counterpart.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Construct a :class:`.ColumnClause` object.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`.ColumnClause`.
:param text: the name of the column. Quoting rules will be applied
to the clause like any other column name. For textual column constructs
that are not to be quoted, use the :func:`literal_column` function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object
which will provide result-set translation for this column.
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and other.is_literal
):
return super(ColumnClause, self).\
_compare_name_for_result(other)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if \
name_is_truncatable else \
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as :class:`.Table`,
:class:`.Column`, and others. The class can also be passed explicitly
as the name to any function that receives a name which can be quoted.
Such as to use the :meth:`.Engine.has_table` method with an unconditionally
quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
#def __new__(cls, value, quote, sprcls=False):
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
@util.memoized_instancemethod
def lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
@util.memoized_instancemethod
def upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
return "'%s'" % self
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
#return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
| alex/sqlalchemy | lib/sqlalchemy/sql/elements.py | Python | mit | 80,420 | [
"VisIt"
] | 86bfc65f9d734ee04a7c2773fb927f89f54190ec9301257a444d40b24eadaa09 |
from .base import *
class session(object):
"""
cytoscape session interface as shown in CyREST's swagger documentation for 'session'.
:param url: an url of the type 'http://' + host + ':' + str(port) + '/' + version + '/'.
"""
def __init__(self, url):
self.__url = url + 'commands/session'
self.___url=url
def new(self, verbose=False):
"""
Destroys the current session and creates a new, empty one.
:param wid: Window ID
:param verbose: print more
"""
response=api(url=self.__url+"/new", verbose=verbose)
return response
def open(self, session_file=None,session_url=None, verbose=False):
"""
Opens a session from a local file or URL.
:param session_file: The path to the session file (.cys) to be loaded.
:param session_url: A URL that provides a session file.
:param verbose: print more
"""
PARAMS=set_param(["file", "url"],[session_file, session_url])
response=api(url=self.__url+"/open", PARAMS=PARAMS, verbose=verbose)
return response
def save(self, session_file, verbose=False):
"""
Saves the current session to an existing file, which will be replaced.
If this is a new session that has not been saved yet, use 'save as'
instead.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more
"""
PARAMS={"file":session_file}
response=api(url=self.__url+"/save", PARAMS=PARAMS, verbose=verbose)
return response
def save_as(self, session_file, verbose=False):
"""
Saves the current session as a new file.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more
"""
PARAMS={"file":session_file}
response=api(url=self.__url+"/save as", PARAMS=PARAMS, verbose=verbose)
return response
def createSessionFile(self, file, verbose=None):
"""
Saves the current session to a file. If successful, the session file location will be returned.
:param file: Session file location as an absolute path
:param verbose: print more
:returns: 200: successful operation
"""
PARAMS=set_param(['file'],[file])
response=api(url=self.___url+'session', PARAMS=PARAMS, method="POST", verbose=verbose)
return response
def deleteSession(self, verbose=None):
"""
This deletes the current session and initializes a new one. A message is returned to indicate the success of the deletion.
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'session', method="DELETE", verbose=verbose)
return response
def getSessionFromFile(self, file, verbose=None):
"""
Loads a session from a local file and returns the session file name
:param file: Session file location as an absolute path
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'session', PARAMS={'file':file}, method="GET", verbose=verbose, parse_params=False)
return response
def getSessionName(self, verbose=None):
"""
Returns the file name for the current Cytoscape session.
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'session/name', method="GET", verbose=verbose, parse_params=False)
return response
def runGarbageCollection(self, verbose=None):
"""
Manually call Java's System.gc() to free up unused memory. This process happens automatically, but may be useful to call explicitly for testing or evaluation purposes.
:param verbose: print more
:returns: 204: Successful Garbage Collection
"""
response=api(url=self.___url+'gc', method="GET", verbose=verbose, parse_params=False)
return response | idekerlab/py2cytoscape | py2cytoscape/cyrest/session.py | Python | mit | 4,191 | [
"Cytoscape"
] | fe26253e7102c00f30e59407705c422f04c4aea74d370ec2e61a6ff1b43b3e24 |
#!/usr/bin/env python3
from abc import ABC, abstractproperty
import torch
from .. import settings
from ..distributions import Delta, MultivariateNormal
from ..module import Module
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.memoize import cached, clear_cache_hook
class _VariationalStrategy(Module, ABC):
"""
Abstract base class for all Variational Strategies.
"""
def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
super().__init__()
# Model
object.__setattr__(self, "model", model)
# Inducing points
inducing_points = inducing_points.clone()
if inducing_points.dim() == 1:
inducing_points = inducing_points.unsqueeze(-1)
if learn_inducing_locations:
self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
else:
self.register_buffer("inducing_points", inducing_points)
# Variational distribution
self._variational_distribution = variational_distribution
self.register_buffer("variational_params_initialized", torch.tensor(0))
def _expand_inputs(self, x, inducing_points):
"""
Pre-processing step in __call__ to make x the same batch_shape as the inducing points
"""
batch_shape = _mul_broadcast_shape(inducing_points.shape[:-2], x.shape[:-2])
inducing_points = inducing_points.expand(*batch_shape, *inducing_points.shape[-2:])
x = x.expand(*batch_shape, *x.shape[-2:])
return x, inducing_points
@abstractproperty
@cached(name="prior_distribution_memo")
def prior_distribution(self):
r"""
The :func:`~gpytorch.variational.VariationalStrategy.prior_distribution` method determines how to compute the
GP prior distribution of the inducing points, e.g. :math:`p(u) \sim N(\mu(X_u), K(X_u, X_u))`. Most commonly,
this is done simply by calling the user defined GP prior on the inducing point data directly.
:rtype: :obj:`~gpytorch.distributions.MultivariateNormal`
:return: The distribution :math:`p( \mathbf u)`
"""
raise NotImplementedError
@property
@cached(name="variational_distribution_memo")
def variational_distribution(self):
return self._variational_distribution()
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
r"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
:param torch.Tensor x: Locations :math:`\mathbf X` to get the
variational posterior of the function values at.
:param torch.Tensor inducing_points: Locations :math:`\mathbf Z` of the inducing points
:param torch.Tensor inducing_values: Samples of the inducing function values :math:`\mathbf u`
(or the mean of the distribution :math:`q(\mathbf u)` if q is a Gaussian.
:param ~gpytorch.lazy.LazyTensor variational_inducing_covar: If the distribuiton :math:`q(\mathbf u)`
is Gaussian, then this variable is the covariance matrix of that Gaussian. Otherwise, it will be
:attr:`None`.
:rtype: :obj:`~gpytorch.distributions.MultivariateNormal`
:return: The distribution :math:`q( \mathbf f(\mathbf X))`
"""
raise NotImplementedError
def kl_divergence(self):
r"""
Compute the KL divergence between the variational inducing distribution :math:`q(\mathbf u)`
and the prior inducing distribution :math:`p(\mathbf u)`.
:rtype: torch.Tensor
"""
with settings.max_preconditioner_size(0):
kl_divergence = torch.distributions.kl.kl_divergence(self.variational_distribution, self.prior_distribution)
return kl_divergence
def train(self, mode=True):
# Make sure we are clearing the cache if we change modes
if (self.training and not mode) or mode:
clear_cache_hook(self)
return super().train(mode=mode)
def __call__(self, x, prior=False):
# If we're in prior mode, then we're done!
if prior:
return self.model.forward(x)
# Delete previously cached items from the training distribution
if self.training:
clear_cache_hook(self)
# (Maybe) initialize variational distribution
if not self.variational_params_initialized.item():
prior_dist = self.prior_distribution
self._variational_distribution.initialize_variational_distribution(prior_dist)
self.variational_params_initialized.fill_(1)
# Ensure inducing_points and x are the same size
inducing_points = self.inducing_points
if inducing_points.shape[:-2] != x.shape[:-2]:
x, inducing_points = self._expand_inputs(x, inducing_points)
# Get p(u)/q(u)
variational_dist_u = self.variational_distribution
# Get q(f)
if isinstance(variational_dist_u, MultivariateNormal):
return super().__call__(
x,
inducing_points,
inducing_values=variational_dist_u.mean,
variational_inducing_covar=variational_dist_u.lazy_covariance_matrix,
)
elif isinstance(variational_dist_u, Delta):
return super().__call__(
x, inducing_points, inducing_values=variational_dist_u.mean, variational_inducing_covar=None
)
else:
raise RuntimeError(
f"Invalid variational distribuition ({type(variational_dist_u)}). "
"Expected a multivariate normal or a delta distribution."
)
| jrg365/gpytorch | gpytorch/variational/_variational_strategy.py | Python | mit | 6,122 | [
"Gaussian"
] | cbf329ff3ac64378b8e2456fbfd4a4611c6f179ada0ab8216307b67e4a26bc48 |
from __future__ import division, unicode_literals
import warnings
import matplotlib
matplotlib.use('pdf')
import unittest as unittest
import numpy as np
from pymatgen import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram, \
GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.analysis.interface_reactions import InterfacialReactivity
class InterfaceReactionTest(unittest.TestCase):
def setUp(self):
self.entries = [ComputedEntry(Composition('Li'), 0),
ComputedEntry(Composition('Mn'), 0),
ComputedEntry(Composition('O2'), 0),
ComputedEntry(Composition('MnO2'), -10),
ComputedEntry(Composition('Mn2O4'), -60),
ComputedEntry(Composition('MnO3'), 20),
ComputedEntry(Composition('Li2O'), -10),
ComputedEntry(Composition('Li2O2'), -8),
ComputedEntry(Composition('LiMnO2'), -30)
]
self.pd = PhaseDiagram(self.entries)
chempots = {'Li': -3}
self.gpd = GrandPotentialPhaseDiagram(self.entries, chempots)
self.ir = []
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('MnO2'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Li2O'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('Li2O'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=True))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
self.gpd, norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=True))
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
self.gpd, norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=1, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
with self.assertRaises(Exception) as context1:
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide grand phase diagram '
'to compute no_mixing_energy!' == str(context1.exception))
with self.assertRaises(Exception) as context2:
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.gpd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide non-grand phase diagram '
'to compute no_mixing_energy!' == str(context2.exception))
def test_get_entry_energy(self):
# Test warning message.
comp = Composition('MnO3')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
energy = InterfacialReactivity._get_entry_energy(self.pd, comp)
self.assertTrue(len(w) == 1)
self.assertTrue("The reactant MnO3 has no matching entry with"
" negative formation energy, instead convex "
"hull energy for this composition will be used"
" for reaction energy calculation."
in str(w[-1].message))
test1 = np.isclose(energy, -30, atol=1e-03)
self.assertTrue(test1,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
# Test normal functionality
comp = Composition('MnO2')
test2 = np.isclose(InterfacialReactivity._get_entry_energy(self.pd, comp), -30,
atol=1e-03)
self.assertTrue(test2,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
def test_get_grand_potential(self):
comp = Composition('LiMnO2')
# Test non-normalized case
test1 = np.isclose(self.ir[1]._get_grand_potential(comp), -27,
atol=1e-03)
self.assertTrue(test1,
'_get_grand_potential: '
'Non-normalized case gets error!')
# Test normalized case
test2 = np.isclose(self.ir[2]._get_grand_potential(comp), -9,
atol=1e-03)
self.assertTrue(test2,
'_get_grand_potential: '
'Normalized case gets error!')
comp2 = Composition('Li2O2')
# Test use_hull_energy option.
test3 = np.isclose(self.ir[8]._get_grand_potential(comp2), -4,
atol=1e-03)
self.assertTrue(test3,
'_get_grand_potential: '
'get hull energy gets error!')
test4 = np.isclose(self.ir[9]._get_grand_potential(comp2), -2,
atol=1e-03)
self.assertTrue(test4,
'_get_grand_potential: '
'gets error for {}!'.format(comp2.reduced_formula))
def test_get_energy(self):
test1 = (np.isclose(self.ir[0]._get_energy(0.5), -15, atol=1e-03))
self.assertTrue(test1, '_get_energy: phase diagram gets error!')
test2 = (
np.isclose(self.ir[3]._get_energy(0.6666666), -7.333333, atol=1e-03))
self.assertTrue(test2,
'_get_energy: '
'grand canonical phase diagram gets error!')
test3 = (
np.isclose(self.ir[6]._get_energy(0.3333333), -3.333333, atol=1e-03))
self.assertTrue(test3,
'_get_energy: convex hull energy gets error. ')
test4 = (
np.isclose(self.ir[7]._get_energy(0.3333333), -4, atol=1e-03))
self.assertTrue(test4,
'_get_energy: gets error. ')
def test_get_reaction(self):
test1 = str(self.ir[0]._get_reaction(0.5)) == 'O2 + Mn -> MnO2'
self.assertTrue(test1,
'_get_reaction: '
'reaction not involving chempots species gets error!')
test2 = str(self.ir[3]._get_reaction(0.666666)) \
== 'Mn + Li2O -> 2 Li + 0.5 MnO2 + 0.5 Mn' \
or str(self.ir[3]._get_reaction(0.666666)) \
== 'Mn + Li2O -> 2 Li + 0.5 Mn + 0.5 MnO2'
self.assertTrue(test2,
'_get_reaction: '
'reaction involving chempots species gets error!')
def test_get_get_elmt_amt_in_rxt(self):
rxt1 = Reaction(
[Composition('Mn'), Composition('O2'), Composition('Li')],
[Composition('LiMnO2')])
test1 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt1), 3)
self.assertTrue(test1,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
rxt2 = rxt1
rxt2.normalize_to(Composition('Li'), 0.5)
test2 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt2), 1.5)
self.assertTrue(test2,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
rxt3 = Reaction([Composition('O2'), Composition('Li')],
[Composition('Li2O')])
# Li is not counted
test3 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt3), 1)
self.assertTrue(test3,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
# Li is counted
test4 = np.isclose(self.ir[6]._get_elmt_amt_in_rxt(rxt3), 3)
self.assertTrue(test4,
'_get_get_elmt_amt_in_rxt: '
'pd elements amounts gets error!')
def test_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [InterfacialReactivity._convert(x, f1, f2) for x, f1, f2 in test_array]
answer = [0.75, 0.5, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected,'
' but gets {1}'.format(answer, result))
def test_reverse_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [InterfacialReactivity._reverse_convert(x, f1, f2) for x, f1, f2 in
test_array]
answer = [0.25, 0.3076923, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected,'
' but gets {1}'.format(answer, result))
def test_get_products(self):
test1 = sorted(self.ir[0].get_products()) == sorted(
['MnO2', 'O2', 'Mn'])
self.assertTrue(test1,
'get_products: decomposition products gets error '
'for reaction not involving chempots species!')
test2 = sorted(self.ir[3].get_products()) == sorted(
['Li', 'MnO2', 'Mn', 'Li2O'])
self.assertTrue(test2,
'get_decomp: decomposition products gets error '
'for reaction involving chempots species!')
def test_get_kinks(self):
ir = self.ir[0]
lst = list(self.ir[0].get_kinks())
index = [i[0] for i in lst]
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
react_kink = [str(i[3]) for i in lst]
energy_per_rxt_kink = [i[4] for i in lst]
test1 = index == [1, 2, 3]
self.assertTrue(test1, 'get_kinks:index gets error!')
test2 = np.allclose(x_kink, [0, 0.5, 1])
self.assertTrue(test2, 'get_kinks:x kinks gets error!')
test3 = np.allclose(energy_kink, [0, -15, 0])
self.assertTrue(test3, 'get_kinks:energy kinks gets error!')
test4 = react_kink == ['Mn -> Mn', 'O2 + Mn -> MnO2', 'O2 -> O2']
self.assertTrue(test4,
'get_kinks: reaction kinks '
'gets error for {0} and {1} reaction!'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
test5 = np.allclose(energy_per_rxt_kink,
[0,
-30 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
0])
self.assertTrue(test5, 'get_kinks: energy_per_rxt_kinks gets error!')
lst = list(self.ir[10].get_kinks())
index = [i[0] for i in lst]
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
react_kink = [str(i[3]) for i in lst]
energy_per_rxt_kink = [i[4] for i in lst]
test6 = index == [1, 2, 3]
self.assertTrue(test6, 'get_kinks:index gets error!')
test7 = np.allclose(x_kink, [0, 0.66667, 1])
self.assertTrue(test7, 'get_kinks:x kinks gets error!')
test8 = np.allclose(energy_kink, [0, -10, 0])
self.assertTrue(test8, 'get_kinks:energy kinks gets error!')
test9 = react_kink == ['Mn -> Mn', 'O2 + Mn -> MnO2', 'O2 -> O2']
self.assertTrue(test9,
'get_kinks:reaction kinks '
'gets error for {0} and {1} reaction!'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
test10 = np.allclose(energy_per_rxt_kink,
[0,
-30 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
0])
self.assertTrue(test10, 'get_kinks:energy_per_rxt_kinks gets error!')
def test_labels(self):
ir = self.ir[0]
dict = ir.labels()
test1 = dict == {1: 'x= 0.0 energy in eV/atom = 0.0 Mn -> Mn',
2: 'x= 0.5 energy in eV/atom = -15.0 O2 + Mn -> MnO2',
3: 'x= 1.0 energy in eV/atom = 0.0 O2 -> O2'}
self.assertTrue(test1,
'labels:label does not match for interfacial system '
'with {0} and {1}.'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
def test_plot(self):
# Test plot is hard. Here just to call the plot function to see if any
# error occurs.
for i in self.ir:
i.plot()
def test_minimum(self):
answer = [
(0.5, -15),
(0, 0),
(0.3333333, -10),
(0.6666666, -7.333333),
(0.3333333, -7.333333),
(0.1428571, -7.333333),
(0.3333333, -3.333333),
(0.3333333, -4.0),
]
for i, j in zip(self.ir, answer):
self.assertTrue(np.allclose(i.minimum(), j),
'minimum: the system with {0} and {1} '
'gets error!{2} expected, but gets {3}'.format(
i.c1_original.reduced_formula,
i.c2_original.reduced_formula, str(j),
str(i.minimum())))
def test_get_no_mixing_energy(self):
with self.assertRaises(Exception) as context1:
self.ir[0].get_no_mixing_energy()
self.assertTrue(
'Please provide grand potential phase diagram'
' for computing no_mixing_energy!' == str(context1.exception))
answer = [
[(u'MnO2 (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Li2O (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Mn (eV/atom)', 0.0), (u'Li2O (eV/atom)', 0.0)]
]
def name_lst(lst):
return (lst[0][0], lst[1][0])
def energy_lst(lst):
return (lst[0][1], lst[1][1])
result_info = [i.get_no_mixing_energy() for i in self.ir if i.grand]
for i, j in zip(result_info, answer):
self.assertTrue(name_lst(i) == name_lst(j),
'get_no_mixing_energy: names get error,'
' {0} expected but gets {1}'.format(
name_lst(j), name_lst(i)))
self.assertTrue(np.allclose(energy_lst(i), energy_lst(j)),
'get_no_mixing_energy: '
'no_mixing energies get error, '
'{0} expected but gets {1}'.format(
energy_lst(j), energy_lst(i)))
if __name__ == '__main__':
unittest.main()
| nisse3000/pymatgen | pymatgen/analysis/tests/test_interface_reactions.py | Python | mit | 17,218 | [
"pymatgen"
] | 5ab5543c3163c6a13a930820d2e2aad8e90291dea8fb5580e6fc7d826acf1d31 |
from __future__ import unicode_literals
import datetime
import requests
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import (SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER,
SIGNATURE_HMAC)
from six.moves.urllib.parse import urlencode, parse_qs
from .constants import (XERO_BASE_URL, XERO_PARTNER_BASE_URL,
REQUEST_TOKEN_URL, AUTHORIZE_URL, ACCESS_TOKEN_URL)
from .exceptions import *
OAUTH_EXPIRY_SECONDS = 3600 # Default unless a response reports differently
class PrivateCredentials(object):
"""An object wrapping the 2-step OAuth process for Private Xero API access.
Usage:
1) Construct a PrivateCredentials() instance:
>>> from xero.auth import PrivateCredentials
>>> credentials = PrivateCredentials(<consumer_key>, <rsa_key>)
rsa_key should be a multi-line string, starting with:
-----BEGIN RSA PRIVATE KEY-----\n
2) Use the credentials:
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, rsa_key):
self.consumer_key = consumer_key
self.rsa_key = rsa_key
self.base_url = XERO_BASE_URL
# Private API uses consumer key as the OAuth token.
self.oauth_token = consumer_key
self.oauth = OAuth1(
self.consumer_key,
resource_owner_key=self.oauth_token,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
)
class PublicCredentials(object):
"""An object wrapping the 3-step OAuth process for Public Xero API access.
Usage:
1) Construct a PublicCredentials() instance:
>>> from xero import PublicCredentials
>>> credentials = PublicCredentials(<consumer_key>, <consumer_secret>)
2) Visit the authentication URL:
>>> credentials.url
If a callback URI was provided (e.g., https://example.com/oauth),
the user will be redirected to a URL of the form:
https://example.com/oauth?oauth_token=<token>&oauth_verifier=<verifier>&org=<organization ID>
from which the verifier can be extracted. If no callback URI is
provided, the verifier will be shown on the screen, and must be
manually entered by the user.
3) Verify the instance:
>>> credentials.verify(<verifier string>)
4) Use the credentials.
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, consumer_secret,
callback_uri=None, verified=False,
oauth_token=None, oauth_token_secret=None,
oauth_expires_at=None, oauth_authorization_expires_at=None):
"""Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self._oauth = None
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self.base_url = XERO_BASE_URL
self._signature_method = SIGNATURE_HMAC
# These are not strictly used by Public Credentials, but
# are reserved for use by other credentials (i.e. Partner)
self.rsa_key = None
self.client_cert = None
self.oauth_session_handle = None
self._init_credentials(oauth_token, oauth_token_secret)
def _init_credentials(self, oauth_token, oauth_token_secret):
"Depending on the state passed in, get self._oauth up and running"
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# credentials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
# This is a brand new set of credentials - we need to generate
# an oauth token so it's available for the url property.
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
url = self.base_url + REQUEST_TOKEN_URL
response = requests.post(url=url, auth=oauth, cert=self.client_cert)
self._process_oauth_response(response)
def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize a verified set of OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
def _process_oauth_response(self, response):
"Extracts the fields from an oauth response"
if response.status_code == 200:
credentials = parse_qs(response.text)
# Initialize the oauth credentials
self._init_oauth(
credentials.get('oauth_token')[0],
credentials.get('oauth_token_secret')[0]
)
# If tokens are refreshable, we'll get a session handle
self.oauth_session_handle = credentials.get(
'oauth_session_handle', [None])[0]
# Calculate token/auth expiry
oauth_expires_in = credentials.get(
'oauth_expires_in',
[OAUTH_EXPIRY_SECONDS])[0]
oauth_authorisation_expires_in = credentials.get(
'oauth_authorization_expires_in',
[OAUTH_EXPIRY_SECONDS])[0]
self.oauth_expires_at = datetime.datetime.now() + \
datetime.timedelta(seconds=int(
oauth_expires_in))
self.oauth_authorization_expires_at = \
datetime.datetime.now() + \
datetime.timedelta(seconds=int(
oauth_authorisation_expires_in))
else:
self._handle_error_response(response)
def _handle_error_response(self, response):
if response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
@property
def state(self):
"""Obtain the useful state of this credentials object so that
we can reconstruct it independently.
"""
return dict(
(attr, getattr(self, attr))
for attr in (
'consumer_key', 'consumer_secret', 'callback_uri',
'verified', 'oauth_token', 'oauth_token_secret',
'oauth_session_handle', 'oauth_expires_at',
'oauth_authorization_expires_at'
)
if getattr(self, attr) is not None
)
def verify(self, verifier):
"Verify an OAuth token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
verifier=verifier,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
# Make the verification request, gettiung back an access token
url = self.base_url + ACCESS_TOKEN_URL
response = requests.post(url=url, auth=oauth, cert=self.client_cert)
self._process_oauth_response(response)
self.verified = True
@property
def url(self):
"Returns the URL that can be visited to obtain a verifier code"
# The authorize url is always api.xero.com
url = XERO_BASE_URL + AUTHORIZE_URL + '?' + \
urlencode({'oauth_token': self.oauth_token})
return url
@property
def oauth(self):
"Returns the requests-compatible OAuth object"
if self._oauth is None:
raise XeroNotVerified("OAuth credentials haven't been verified")
return self._oauth
def expired(self, now=None):
if now is None:
now = datetime.datetime.now()
# Credentials states from older versions might not have
# oauth_expires_at available
if self.oauth_expires_at is None:
raise XeroException(None, "Expiry time is not available")
# Allow a bit of time for clock differences and round trip times
# to prevent false negatives. If users want the precise expiry,
# they can use self.oauth_expires_at
CONSERVATIVE_SECONDS = 30
return self.oauth_expires_at <= \
(now + datetime.timedelta(seconds=CONSERVATIVE_SECONDS))
class PartnerCredentials(PublicCredentials):
"""An object wrapping the 3-step OAuth process for Partner Xero API access.
Usage is very similar to Public Credentials with the following changes:
1) You'll need to pass the private key for your RSA certificate.
>>> rsa_key = "-----BEGIN RSA PRIVATE KEY----- ..."
2) You'll need to pass a tuple to the Entrust certificate pair.
>>> client_cert = ('/path/to/entrust-cert.pem',
'/path/to/entrust-private-nopass.pem')
3) Once a token has expired, you can refresh it to get another 30 mins
>>> credentials = PartnerCredentials(**state)
>>> if credentials.expired():
credentials.refresh()
4) Authorization expiry and token expiry become different things.
oauth_expires_at tells when the current token expires (~30 min window)
oauth_authorization_expires_at tells when the overall access
permissions expire (~10 year window)
"""
def __init__(self, consumer_key, consumer_secret, rsa_key, client_cert,
callback_uri=None, verified=False,
oauth_token=None, oauth_token_secret=None,
oauth_expires_at=None, oauth_authorization_expires_at=None,
oauth_session_handle=None):
"""Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self._oauth = None
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self._signature_method = SIGNATURE_RSA
self.base_url = XERO_PARTNER_BASE_URL
self.rsa_key = rsa_key
self.client_cert = client_cert
self.oauth_session_handle = oauth_session_handle
self._init_credentials(oauth_token, oauth_token_secret)
def refresh(self):
"Refresh an expired token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
# Make the verification request, getting back an access token
params = {'oauth_session_handle': self.oauth_session_handle}
response = requests.post(url=self.base_url + ACCESS_TOKEN_URL,
params=params, auth=oauth, cert=self.client_cert)
self._process_oauth_response(response)
| MJMortimer/pyxero | xero/auth.py | Python | bsd-3-clause | 13,625 | [
"VisIt"
] | e7c50eaf91b091a9ca538d2b45240df1a54ccca446f71eff0b782f19c8a6baa2 |
import ast
import collections
from ..visitor import ClassVisitor, handle
from . import Metric
class _TypeCountVisitor(ClassVisitor):
@handle(ast.AST)
def __visit_ast(self, node):
return (node.__class__,) + tuple(cls for name in node._fields for cls in self.visit(getattr(node, name)))
@handle(collections.Sequence)
def __visit_sequence(self, node):
return tuple(cls for entry in node for cls in self.visit(entry))
@handle(str)
def __visit_str(self, node):
return ()
def default(self, node):
return ()
@classmethod
def count(cls, node):
res = {}
for entry in cls().visit(node):
res.setdefault(entry, 0)
res[entry] += 1
return res
class _CyclomaticVisitor(ClassVisitor):
@handle(
ast.If,
ast.IfExp,
ast.For,
ast.While,
ast.TryExcept,
ast.TryFinally,
ast.Break,
ast.Continue,
ast.And,
ast.Or
)
def __visit_selected(self, node):
return 1 + self.__visit_ast(node)
@handle(ast.FunctionDef)
def __visit_function(self, node):
count = _TypeCountVisitor.count(node).get(ast.Return, 0)
if isinstance(node.body[-1], ast.Return):
count -= 1
return count + self.__visit_ast(node)
@handle(ast.AST)
def __visit_ast(self, node):
return sum(self.visit(getattr(node, name)) for name in node._fields)
@handle(collections.Sequence)
def __visit_sequence(self, node):
return sum(self.visit(entry) for entry in node)
@handle(str)
def __visit_str(self, node):
return 0
def default(self, node):
return 0
class CyclomaticComplexity(Metric):
def calculate(self, node):
return _CyclomaticVisitor().visit(node.ast)
def get_metric_name(self):
return 'Cyclomatic complexity'
| herczy/pydepend | pydepend/metric/cyclomatic.py | Python | bsd-3-clause | 1,908 | [
"VisIt"
] | c66a25e202655c7f073a823fb8d8dccc257ea7f48e319421947bec27c7206669 |
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from .models import APIKey, Short, Visit
def _record_visit(request, short):
remote_addr = (
request.META.get('REMOTE_ADDR') or
request.META.get('HTTP_X_REAL_IP') or
request.META.get('HTTP_X_FORWARDED_FOR')
)
return Visit.objects.create(
short=short,
remote_addr=remote_addr,
user_agent=request.META.get('HTTP_USER_AGENT'),
referrer=request.META.get('HTTP_REFERER'),
)
def short_detail(request, short_key):
try:
short = Short.objects.get_for_key(short_key)
except Short.DoesNotExist as e:
raise Http404(e.message)
_record_visit(request, short)
if short.destination:
return redirect(short.destination)
return redirect(short.image.url)
def short_create(request):
url = request.GET.get('url')
api_key = request.GET.get('key')
user = APIKey.objects.get(key=api_key).user
short, __ = Short.objects.get_or_create(
destination=url,
created_by=user,
)
domain = get_current_site(request).domain
short_path = reverse('short_detail', kwargs={'short_key': short.key})
short_url = '{scheme}://{domain}{short_path}'.format(
scheme=settings.SHORT_SCHEME,
domain=domain,
short_path=short_path)
return HttpResponse(short_url, content_type='text/plain')
| sneeu/little | little/views.py | Python | mit | 1,560 | [
"VisIt"
] | 5044b35c3eb85a66e78dc6ba0307c40f432a7e54e2055aee67a8bee015916f5c |
# Author: Christian Brodbeck <[email protected]>
#
# License: BSD-3-Clause
import os
import os.path as op
import re
import shutil
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
import warnings
import mne
from mne.datasets import testing
from mne.io import read_info
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.io.constants import FIFF
from mne.surface import dig_mri_distances
from mne.transforms import invert_transform
from mne.utils import requires_mayavi, traits_test, modified_env, get_config
from mne.channels import DigMontage
data_path = testing.data_path(download=False)
raw_path = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
kit_raw_path = op.join(kit_data_dir, 'test_bin_raw.fif')
subjects_dir = op.join(data_path, 'subjects')
fid_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-fiducials.fif')
ctf_raw_path = op.join(data_path, 'CTF', 'catch-alp-good-f.ds')
nirx_15_0_raw_path = op.join(data_path, 'NIRx', 'nirscout',
'nirx_15_0_recording', 'NIRS-2019-10-27_003.hdr')
nirsport2_raw_path = op.join(data_path, 'NIRx', 'nirsport_v2', 'aurora_2021_9',
'2021-10-01_002_config.hdr')
snirf_nirsport2_raw_path = op.join(data_path, 'SNIRF', 'NIRx', 'NIRSport2',
'1.0.3', '2021-05-05_001.snirf')
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_decimation(subjects_dir_tmp):
"""Test CoregModel decimation of high-res to low-res head."""
from mne.gui._coreg_gui import CoregModel
# This makes the test much faster
subject_dir = op.join(subjects_dir_tmp, 'sample')
shutil.move(op.join(subject_dir, 'bem', 'outer_skin.surf'),
op.join(subject_dir, 'surf', 'lh.seghead'))
for fname in ('sample-head.fif', 'sample-head-dense.fif'):
os.remove(op.join(subject_dir, 'bem', fname))
model = CoregModel(guess_mri_subject=False)
with pytest.warns(RuntimeWarning, match='No low-resolution'):
model.mri.subjects_dir = op.dirname(subject_dir)
assert model.mri.subject == 'sample' # already set by setting subjects_dir
assert model.mri.bem_low_res.file == ''
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 2562 # because we moved it
@requires_mayavi
@traits_test
def test_coreg_model(subjects_dir_tmp):
"""Test CoregModel."""
from mne.gui._coreg_gui import CoregModel
trans_dst = op.join(subjects_dir_tmp, 'test-trans.fif')
# make it use MNI fiducials
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
model = CoregModel()
with pytest.raises(RuntimeError, match='Not enough information for savin'):
model.save_trans('blah.fif')
model.mri.subjects_dir = subjects_dir_tmp
model.mri.subject = 'sample'
assert model.mri.fid_ok # automated using MNI fiducials
model.hsp.file = raw_path
assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
assert model.has_lpa_data
assert model.has_nasion_data
assert model.has_rpa_data
assert len(model.hsp.eeg_points) > 1
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 267122
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
model.nasion_weight = 1.
model.fit_fiducials(0)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert new_x < old_x
model.fit_icp(0)
new_dist = np.mean(model.point_distance)
assert new_dist < avg_point_distance
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
# test restoring trans
x, y, z = 100, 200, 50
rot_x, rot_y, rot_z = np.rad2deg([1.5, 0.1, -1.2])
model.trans_x = x
model.trans_y = y
model.trans_z = z
model.rot_x = rot_x
model.rot_y = rot_y
model.rot_z = rot_z
trans = model.mri_head_t
model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
"rot_z"])
assert model.trans_x == 0
model.set_trans(trans)
assert_array_almost_equal(model.trans_x, x)
assert_array_almost_equal(model.trans_y, y)
assert_array_almost_equal(model.trans_z, z)
assert_array_almost_equal(model.rot_x, rot_x)
assert_array_almost_equal(model.rot_y, rot_y)
assert_array_almost_equal(model.rot_z, rot_z)
# info
assert isinstance(model.fid_eval_str, str)
assert isinstance(model.points_eval_str, str)
# scaling job
assert not model.can_prepare_bem_model
model.n_scale_params = 1
assert model.can_prepare_bem_model
model.prepare_bem_model = True
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', False)
assert sdir == subjects_dir_tmp
assert sfrom == 'sample'
assert sto == 'sample2'
assert_allclose(scale, model.parameters[6:9])
assert skip_fiducials is False
# find BEM files
bems = set()
for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
match = re.match(r'sample-(.+-bem)\.fif', fname)
if match:
bems.add(match.group(1))
assert set(bemsol) == bems
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', True)
assert bemsol == []
assert (skip_fiducials)
model.load_trans(fname_trans)
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
assert_allclose(invert_transform(trans)['trans'][:3, 3] * 1000.,
[model.trans_x, model.trans_y, model.trans_z])
@requires_mayavi
@traits_test
def test_coreg_gui_display(subjects_dir_tmp, check_gui_ci):
"""Test CoregFrame."""
from mayavi import mlab
from tvtk.api import tvtk
home_dir = subjects_dir_tmp
# Remove the two files that will make the fiducials okay via MNI estimation
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms',
'talairach.xfm'))
with modified_env(_MNE_GUI_TESTING_MODE='true',
_MNE_FAKE_HOME_DIR=home_dir):
with pytest.raises(ValueError, match='not a valid subject'):
mne.gui.coregistration(
subject='Elvis', subjects_dir=subjects_dir_tmp)
# avoid modal dialog if SUBJECTS_DIR is set to a directory that
# does not contain valid subjects
ui, frame = mne.gui.coregistration(subjects_dir='')
mlab.process_ui_events()
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp,
subject='sample')
mlab.process_ui_events()
assert not frame.model.mri.fid_ok
frame.model.mri.lpa = [[-0.06, 0, 0]]
frame.model.mri.nasion = [[0, 0.05, 0]]
frame.model.mri.rpa = [[0.08, 0, 0]]
assert frame.model.mri.fid_ok
frame.data_panel.raw_src.file = raw_path
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.SphereSource)
frame.data_panel.view_options_panel.eeg_obj.project_to_surface = True
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.CylinderSource)
mlab.process_ui_events()
# grow hair (faster for low-res)
assert frame.data_panel.view_options_panel.head_high_res
frame.data_panel.view_options_panel.head_high_res = False
frame.model.grow_hair = 40.
# scale
frame.coreg_panel.n_scale_params = 3
frame.coreg_panel.scale_x_inc = True
assert frame.model.scale_x == 101.
frame.coreg_panel.scale_y_dec = True
assert frame.model.scale_y == 99.
# reset parameters
frame.coreg_panel.reset_params = True
assert frame.model.grow_hair == 0
assert not frame.data_panel.view_options_panel.head_high_res
# configuration persistence
assert (frame.model.prepare_bem_model)
frame.model.prepare_bem_model = False
frame.save_config(home_dir)
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp)
assert not frame.model.prepare_bem_model
assert not frame.data_panel.view_options_panel.head_high_res
ui.dispose()
mlab.process_ui_events()
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_with_fsaverage(tmpdir):
"""Test CoregModel with the fsaverage brain data."""
tempdir = str(tmpdir)
from mne.gui._coreg_gui import CoregModel
mne.create_default_subject(subjects_dir=tempdir,
fs_home=op.join(subjects_dir, '..'))
model = CoregModel()
model.mri.subjects_dir = tempdir
model.mri.subject = 'fsaverage'
assert model.mri.fid_ok
model.hsp.file = raw_path
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
# test hsp point omission
model.nasion_weight = 1.
model.trans_y = -0.008
model.fit_fiducials(0)
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(np.inf)
assert model.hsp.n_omitted == 0
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.005)
assert model.hsp.n_omitted == 40
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
# scale with 1 parameter
model.n_scale_params = 1
model.fit_fiducials(1)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert (new_x < old_x)
model.fit_icp(1)
avg_point_distance_1param = np.mean(model.point_distance)
assert (avg_point_distance_1param < avg_point_distance)
# scaling job
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert sdir == tempdir
assert sfrom == 'fsaverage'
assert sto == 'scaled'
assert_allclose(scale, model.parameters[6:9])
assert set(bemsol) == {'inner_skull-bem'}
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert bemsol == []
# scale with 3 parameters
model.n_scale_params = 3
model.fit_icp(3)
assert (np.mean(model.point_distance) < avg_point_distance_1param)
# test switching raw disables point omission
assert model.hsp.n_omitted == 1
model.hsp.file = kit_raw_path
assert model.hsp.n_omitted == 0
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_gui_automation():
"""Test that properties get properly updated."""
from mne.gui._file_traits import DigSource
from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
from mne.gui._coreg_gui import CoregModel
subject = 'sample'
hsp = DigSource()
hsp.file = raw_path
mri = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir, subject=subject)
model = CoregModel(mri=mri, hsp=hsp)
# gh-7254
assert not (model.nearest_transformed_high_res_mri_idx_hsp == 0).all()
model.fit_fiducials()
model.icp_iterations = 2
model.nasion_weight = 2.
model.fit_icp()
model.omit_hsp_points(distance=5e-3)
model.icp_iterations = 2
model.fit_icp()
errs_icp = np.median(
model._get_point_distance())
assert 2e-3 < errs_icp < 3e-3
info = mne.io.read_info(raw_path)
errs_nearest = np.median(
dig_mri_distances(info, fname_trans, subject, subjects_dir))
assert 1e-3 < errs_nearest < 2e-3
class TstVTKPicker(object):
"""Class to test cell picking."""
def __init__(self, mesh, cell_id, event_pos):
self.mesh = mesh
self.cell_id = cell_id
self.point_id = None
self.event_pos = event_pos
def GetCellId(self):
"""Return the picked cell."""
return self.cell_id
def GetDataSet(self):
"""Return the picked mesh."""
return self.mesh
def GetPickPosition(self):
"""Return the picked position."""
vtk_cell = self.mesh.GetCell(self.cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
self.point_id = cell[0]
return self.mesh.points[self.point_id]
def GetEventPosition(self):
"""Return event position."""
return self.event_pos
@pytest.mark.slowtest
@testing.requires_testing_data
@pytest.mark.parametrize(
'inst_path', (raw_path, 'gen_montage', ctf_raw_path, nirx_15_0_raw_path,
nirsport2_raw_path, snirf_nirsport2_raw_path))
def test_coreg_gui_pyvista_file_support(inst_path, tmpdir,
renderer_interactive_pyvistaqt):
"""Test reading supported files."""
from mne.gui import coregistration
tempdir = str(tmpdir)
if inst_path == 'gen_montage':
# generate a montage fig to use as inst.
tmp_info = read_info(raw_path)
eeg_chans = []
for pt in tmp_info['dig']:
if pt['kind'] == FIFF.FIFFV_POINT_EEG:
eeg_chans.append(f"EEG {pt['ident']:03d}")
dig = DigMontage(dig=tmp_info['dig'],
ch_names=eeg_chans)
inst_path = op.join(tempdir, 'tmp-dig.fif')
dig.save(inst_path)
# Suppressing warnings here is not ideal.
# However ctf_raw_path (catch-alp-good-f.ds) is poorly formed and causes
# mne.io.read_raw to issue warning.
# XXX consider replacing ctf_raw_path and removing warning ignore filter.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
coregistration(inst=inst_path, subject='sample',
subjects_dir=subjects_dir)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_coreg_gui_pyvista(tmpdir, renderer_interactive_pyvistaqt):
"""Test that using CoregistrationUI matches mne coreg."""
from mne.gui import coregistration
tempdir = str(tmpdir)
config = get_config(home_dir=os.environ.get('_MNE_FAKE_HOME_DIR'))
tmp_trans = op.join(tempdir, 'tmp-trans.fif')
coreg = coregistration(subject='sample', subjects_dir=subjects_dir,
trans=fname_trans)
coreg._reset_fiducials()
coreg.close()
coreg = coregistration(inst=raw_path, subject='sample',
subjects_dir=subjects_dir)
coreg._set_fiducials_file(fid_fname)
assert coreg._fiducials_file == fid_fname
# picking
vtk_picker = TstVTKPicker(coreg._surfaces['head'], 0, (0, 0))
coreg._on_mouse_move(vtk_picker, None)
coreg._on_button_press(vtk_picker, None)
coreg._on_pick(vtk_picker, None)
coreg._on_button_release(vtk_picker, None)
coreg._set_lock_fids(True)
assert coreg._lock_fids
coreg._on_pick(vtk_picker, None) # also pick when locked
coreg._set_lock_fids(False)
assert not coreg._lock_fids
coreg._set_lock_fids(True)
assert coreg._lock_fids
assert coreg._nasion_weight == 10.
coreg._set_point_weight(11., 'nasion')
assert coreg._nasion_weight == 11.
coreg._fit_fiducials()
coreg._fit_icp()
assert coreg._coreg._extra_points_filter is None
coreg._omit_hsp()
assert coreg._coreg._extra_points_filter is not None
coreg._reset_omit_hsp_filter()
assert coreg._coreg._extra_points_filter is None
assert coreg._grow_hair == 0
coreg._set_grow_hair(0.1)
assert coreg._grow_hair == 0.1
assert coreg._orient_glyphs == \
(config.get('MNE_COREG_ORIENT_TO_SURFACE', '') == 'true')
assert coreg._hpi_coils
assert coreg._eeg_channels
assert coreg._head_shape_points
assert coreg._scale_mode == 'None'
assert coreg._icp_fid_match == 'nearest'
assert coreg._head_resolution == \
(config.get('MNE_COREG_HEAD_HIGH_RES', 'true') == 'true')
assert not coreg._head_transparency
coreg._set_head_transparency(True)
assert coreg._head_transparency
coreg._save_trans(tmp_trans)
assert op.isfile(tmp_trans)
coreg.close()
| bloyl/mne-python | mne/gui/tests/test_coreg_gui.py | Python | bsd-3-clause | 17,357 | [
"Mayavi"
] | 7b2eab80347728042def65e93368d029b3a5e05cf40ed46c7f88bc14362b4fb9 |
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the DeclareLaunchArgument action class."""
from launch import LaunchContext
from launch.actions import DeclareLaunchArgument
import pytest
def test_declare_launch_argument_constructors():
"""Test the constructors for DeclareLaunchArgument class."""
DeclareLaunchArgument('name')
DeclareLaunchArgument('name', default_value='default value')
DeclareLaunchArgument('name', default_value='default value', description='description')
DeclareLaunchArgument('name', default_value='val1', description='description',
choices=['val1', 'val2'])
def test_declare_launch_argument_methods():
"""Test the methods of the DeclareLaunchArgument class."""
dla1 = DeclareLaunchArgument('name', default_value='default value', description='description')
assert dla1.name == 'name'
assert isinstance(dla1.default_value, list)
assert dla1.description == 'description'
assert dla1.choices is None
assert 'DeclareLaunchArgument' in dla1.describe()
assert isinstance(dla1.describe_sub_entities(), list)
assert isinstance(dla1.describe_conditional_sub_entities(), list)
dla2 = DeclareLaunchArgument('name')
assert dla2.default_value is None
assert dla2.choices is None
assert dla2.description, 'description does not have a non-empty default value'
dla3 = DeclareLaunchArgument('name', description='description', choices=['var1', 'var2'])
assert dla3.default_value is None
assert dla3.choices == ['var1', 'var2']
assert str(dla3.choices) in dla3.description
with pytest.raises(RuntimeError) as excinfo:
DeclareLaunchArgument('name', description='description', choices=['var1', 'var2'],
default_value='invalid')
assert 'not in provided choices' in str(excinfo.value)
def test_declare_launch_argument_execute():
"""Test the execute (or visit) of the DeclareLaunchArgument class."""
action1 = DeclareLaunchArgument('name')
lc1 = LaunchContext()
with pytest.raises(RuntimeError) as excinfo:
action1.visit(lc1)
assert 'Required launch argument' in str(excinfo.value)
lc1.launch_configurations['name'] = 'value'
assert action1.visit(lc1) is None
action2 = DeclareLaunchArgument('name', default_value='value')
lc2 = LaunchContext()
assert action2.visit(lc2) is None
assert lc1.launch_configurations['name'] == 'value'
action3 = DeclareLaunchArgument('name', default_value='var1', choices=['var1', 'var2'])
lc3 = LaunchContext()
assert action3.visit(lc3) is None
lc3.launch_configurations['name'] = 'invalid_value'
with pytest.raises(RuntimeError) as excinfo:
action3.visit(lc3)
assert 'Valid options are: [var1, var2]' in str(excinfo.value)
lc3.launch_configurations['name'] = 'var1'
assert action3.visit(lc3) is None
lc3.launch_configurations['name'] = 'var2'
assert action3.visit(lc3) is None
| ros2/launch | launch/test/launch/actions/test_declare_launch_argument.py | Python | apache-2.0 | 3,551 | [
"VisIt"
] | 966797089121da9911a1feaea06c8d1935e69018bad76da622c74db14fbff0ed |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
sdk = config['TITANIUM_SDK']
support_dir = os.path.join(sdk,'module','support')
sys.path.append(support_dir)
import markdown
documentation = []
for file in os.listdir(docdir):
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','jp.msmc.gesturerecognizer.js')
if not os.path.exists(js_file): return
sdk = config['TITANIUM_SDK']
iphone_dir = os.path.join(sdk,'iphone')
sys.path.insert(0,iphone_dir)
from compiler import Compiler
path = os.path.basename(js_file)
metadata = Compiler.make_function_from_file(path,js_file)
method = metadata['method']
eq = path.replace('.','_')
method = ' return %s;' % method
f = os.path.join(cwd,'Classes','JpMsmcGesturerecognizerModuleAssets.m')
c = open(f).read()
idx = c.find('return ')
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','jp.msmc.gesturerecognizer.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| atsusy/Gesture-Recognizer | build.py | Python | mit | 5,858 | [
"VisIt"
] | 465200aeae16f4231749cbb7cb86141dc29e25b87dd2dec0229c02103ee5cc41 |
#!/usr/bin/python
#==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# \author Hans J. Johnson
#
# This script is designed to help change the copyright notices in all ITK files to a common format.
# For files that are .h, .cxx, .hxx, .c, if there is no other copyright information, add the itkCopyright.
from __future__ import print_function
import re
import sys
import os
## New license as specified on: https://itk.org/Wiki/ITK_Release_4/Licensing
NewITKCopyrightNotice="""/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
"""
NewVTKDependantCopyrightNotice="""/*=========================================================================
*
* Portions of this file are subject to the VTK Toolkit Version 3 copyright.
*
* Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
*
* For complete copyright, license and disclaimer of warranty information
* please refer to the NOTICE file at the top of the ITK source tree.
*
*=========================================================================*/
"""
## Patterns that match the old copyright notice sections
## ITK only copyright
ITKOnlyOldHeader=""" */\* *==.*Program:.*Insight Segmentation & Registration Toolkit.*Copyright .* Insight.*Consortium. All rights reserved.*See ITKCopyright.txt or https://www.itk.org/HTML/Copyright.htm for details.[\n\r ]*This software is distributed WITHOUT ANY WARRANTY; without even.*the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR.*PURPOSE. See the above copyright notices for more information.*=== *\*/[\n\r ]*"""
ITKOnlyOldRE=re.compile(ITKOnlyOldHeader,re.MULTILINE|re.DOTALL|re.IGNORECASE)
## Files that originated in VTK, and now have ITK also
ITKVTKOldHeader=""" */\* *==.*Program:.*Insight Segmentation & Registration Toolkit.*Copyright .* Insight Software Consortium. All rights reserved.*See ITKCopyright.txt or https://www.itk.org/HTML/Copyright.htm for details.[\n\r ]*.*VTKCopyright.txt.*This software is distributed WITHOUT ANY WARRANTY; without even.*the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR.*PURPOSE. See the above copyright notices for more information.*=== *\*/[\n\r ]*"""
ITKVTKOldRE=re.compile(ITKVTKOldHeader,re.MULTILINE|re.DOTALL|re.IGNORECASE)
## Looking for new files.
NewITKHeader=""" */\* *==.*http://www.apache.org/licenses/LICENSE-2.0.txt.*=== *\*/"""
NewITKHeaderRE=re.compile(NewITKHeader,re.MULTILINE|re.DOTALL|re.IGNORECASE)
eolSpaceRemove=re.compile(r' *$',re.MULTILINE)
## The exception list contains files that should not have the ITK copyright notices added.
ExclusionList=['Utilities','.git']
ExtensionsThatNeedCopyright=['.cxx','.c','.h','.hxx']
############
############
############
############
############
############
############
if len(sys.argv) != 2:
print("USAGE: {0} <Top of ITK tree to process>".format(sys.argv[0]))
sys.exit(-1)
HeadOfITKTree=sys.argv[1]
for top,directory,files in os.walk(HeadOfITKTree):
## First remove Excluded directories
for dd in directory:
if dd[0] == '.': #Skip all directories that begin with '.'
directory.remove(dd)
continue
if dd in ExclusionList:
directory.remove(dd)
continue
## Now process each file
for ff in files:
if ff in ExclusionList:
files.remove(ff)
continue
if ff[0] == '.': #Skip all files that begin with '.'
files.remove(ff)
#print("@@@@@@@",ff)
continue
currFile=os.path.join(top,ff)
print(currFile)
infile=open(currFile,'r')
file_text=infile.read()
newstring=file_text # default output to input, just in case all search patterns fail
infile.close()
substitutionMade=0
testITKOnlySearch=ITKOnlyOldRE.search(file_text)
if testITKOnlySearch:
print("{0} is ITKOnlyHeader".format(currFile))
newstring=ITKOnlyOldRE.sub(NewITKCopyrightNotice,file_text)
newstring=eolSpaceRemove.sub("",newstring) ## a few files still have eol spaces
substitutionMade=1
testITKVTKSearch=ITKVTKOldRE.search(file_text)
if testITKVTKSearch:
print("{0} is VTKITKHeader".format(currFile))
newstring=ITKVTKOldRE.sub(NewITKCopyrightNotice+NewVTKDependantCopyrightNotice,file_text)
newstring=eolSpaceRemove.sub("",newstring) ## a few files still have eol spaces
substitutionMade=1
##Add new copyright if it had not already existed.
root,ext=os.path.splitext(currFile)
if ext in ExtensionsThatNeedCopyright:
testNewITKHeaderRE=NewITKHeaderRE.search(file_text) # see if new CopyRight notice already exists.
if testNewITKHeaderRE:
print("Already Processed {0}".format(currFile))
elif (substitutionMade == 0):
print("{0} needed copyright header.".format(currFile))
newstring=NewITKCopyrightNotice+file_text
newstring=eolSpaceRemove.sub("",newstring) ## a few files still have eol spaces
outfile=open(currFile,'w')
outfile.write(newstring)
outfile.close()
| richardbeare/ITK | Utilities/Maintenance/UpdateCopyrightStatementsInITK.py | Python | apache-2.0 | 6,453 | [
"VTK"
] | 70f5abe5d90fab68c9f4a9c60dde4a634c70587a826adbdb9b9060b21fbc4c56 |
import numpy as np
import itertools
import logging
import time
import traceback
import threading
from collections import Mapping
from distutils.version import StrictVersion
from ..conventions import cf_encoder
from ..core.utils import FrozenOrderedDict
from ..core.pycompat import iteritems, dask_array_type, OrderedDict
# Create a logger object, but don't add any handlers. Leave that to user code.
logger = logging.getLogger(__name__)
NONE_VAR_NAME = '__values__'
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def is_trivial_index(var):
"""
Determines if in index is 'trivial' meaning that it is
equivalent to np.arange(). This is determined by
checking if there are any attributes or encodings,
if ndims is one, dtype is int and finally by comparing
the actual values to np.arange()
"""
# if either attributes or encodings are defined
# the index is not trivial.
if len(var.attrs) or len(var.encoding):
return False
# if the index is not a 1d integer array
if var.ndim > 1 or not var.dtype.kind == 'i':
return False
arange = np.arange(var.size, dtype=var.dtype)
return np.all(var.values == arange)
def robust_getitem(array, key, catch=Exception, max_retries=6,
initial_delay=500):
"""
Robustly index an array, using retry logic with exponential backoff if any
of the errors ``catch`` are raised. The initial_delay is measured in ms.
With the default settings, the maximum delay will be in the range of 32-64
seconds.
"""
assert max_retries >= 0
for n in range(max_retries + 1):
try:
return array[key]
except catch:
if n == max_retries:
raise
base_delay = initial_delay * 2 ** n
next_delay = base_delay + np.random.randint(base_delay)
msg = ('getitem failed, waiting %s ms before trying again '
'(%s tries remaining). Full traceback: %s' %
(next_delay, max_retries - n, traceback.format_exc()))
logger.debug(msg)
time.sleep(1e-3 * next_delay)
class AbstractDataStore(Mapping):
def __iter__(self):
return iter(self.variables)
def __getitem__(self, key):
return self.variables[key]
def __len__(self):
return len(self.variables)
def get_attrs(self): # pragma: no cover
raise NotImplementedError
def get_variables(self): # pragma: no cover
raise NotImplementedError
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example:
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenOrderedDict((_decode_variable_name(k), v)
for k, v in iteritems(self.get_variables()))
attributes = FrozenOrderedDict(self.get_attrs())
return variables, attributes
@property
def variables(self):
# Because encoding/decoding might happen which may require both the
# attributes and the variables, and because a store may be updated
# we need to load both the attributes and variables
# anytime either one is requested.
variables, _ = self.load()
return variables
@property
def attrs(self):
# Because encoding/decoding might happen which may require both the
# attributes and the variables, and because a store may be updated
# we need to load both the attributes and variables
# anytime either one is requested.
_, attributes = self.load()
return attributes
@property
def dimensions(self):
return self.get_dimensions()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ArrayWriter(object):
def __init__(self):
self.sources = []
self.targets = []
def add(self, source, target):
if isinstance(source, dask_array_type):
self.sources.append(source)
self.targets.append(target)
else:
target[...] = source
def sync(self):
if self.sources:
import dask.array as da
import dask
if StrictVersion(dask.__version__) > StrictVersion('0.8.1'):
da.store(self.sources, self.targets, lock=threading.Lock())
else:
da.store(self.sources, self.targets)
self.sources = []
self.targets = []
class AbstractWritableDataStore(AbstractDataStore):
def __init__(self, writer=None):
if writer is None:
writer = ArrayWriter()
self.writer = writer
def set_dimension(self, d, l): # pragma: no cover
raise NotImplementedError
def set_attribute(self, k, v): # pragma: no cover
raise NotImplementedError
def set_variable(self, k, v): # pragma: no cover
raise NotImplementedError
def sync(self):
self.writer.sync()
def store_dataset(self, dataset):
# in stores variables are all variables AND coordinates
# in xarray.Dataset variables are variables NOT coordinates,
# so here we pass the whole dataset in instead of doing
# dataset.variables
self.store(dataset, dataset.attrs)
def store(self, variables, attributes, check_encoding_set=frozenset()):
self.set_attributes(attributes)
neccesary_dims = [v.dims for v in variables.values()]
neccesary_dims = set(itertools.chain(*neccesary_dims))
# set all non-indexes and any index which is not trivial.
variables = OrderedDict((k, v) for k, v in iteritems(variables)
if not (k in neccesary_dims and
is_trivial_index(v)))
self.set_variables(variables, check_encoding_set)
def set_attributes(self, attributes):
for k, v in iteritems(attributes):
self.set_attribute(k, v)
def set_variables(self, variables, check_encoding_set):
for vn, v in iteritems(variables):
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(name, v, check)
self.writer.add(source, target)
def set_necessary_dimensions(self, variable):
for d, l in zip(variable.dims, variable.shape):
if d not in self.dimensions:
self.set_dimension(d, l)
class WritableCFDataStore(AbstractWritableDataStore):
def store(self, variables, attributes, check_encoding_set=frozenset()):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
cf_variables, cf_attrs = cf_encoder(variables, attributes)
AbstractWritableDataStore.store(self, cf_variables, cf_attrs,
check_encoding_set)
| NicWayand/xray | xarray/backends/common.py | Python | apache-2.0 | 7,876 | [
"NetCDF"
] | 6b70bb9f1a602b9f0e9932ff55ae5efcc049d59ab08b72b0865f4605bf34616c |
#!/usr/bin/python
#
# Copyright (C) 2014, Jaguar Land Rover
#
# This program is licensed under the terms and conditions of the
# Mozilla Public License, version 2.0. The full text of the
# Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
#
#
# Simple RVI service caller
#
import sys
from rvilib import RVI
import threading
import time
import getopt
def usage():
print "Usage:", sys.argv[0], "[-n RVI-node] service key=val ..."
print " RVI-node DNS name or IP of host running RVI. "
print " default: http://localhost:9001"
print " service Service to invoke in RVI."
print " key=val Named arguments to provide to service."
print
print "Example: ./callrvi.py -n http://rvi1.nginfotpdx.net:9001 \\"
print " jlr.com/vin/aaron/4711/test/ping \\"
print " arg1=val1 arg2=val2"
sys.exit(255)
#
# Check that we have the correct arguments
#
opts, args= getopt.getopt(sys.argv[1:], "n:")
rvi_node = "http://localhost:9001"
for o, a in opts:
if o == "-n":
rvi_node = a
else:
usage()
if len(args) < 1:
usage()
# Construct a dictionary from the provided paths.
i = 0
service = args[0]
rvi_args = {}
for i in args[1:]:
print i
[k, v] = i.split('=')
rvi_args[k] = v
#
# Setup an outbound JSON-RPC connection to the backend RVI node
# Service Edge.
#
rvi = RVI(rvi_node)
print "RVI Node: ", rvi_node
print "Service: ", service
print "args: ", rvi_args
#
# Send the messge.
#
rvi.message(service, rvi_args)
| magnusfeuer/rvi_core | python/rvi_call.py | Python | mpl-2.0 | 1,624 | [
"Jaguar"
] | 9d61ad24355832a2b36eb1e737da5748e50d47487823333b79eb1e452182e1ed |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}',
'{{first_name_male}} {{last_name}}-{{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}',
'{{first_name_female}} {{last_name}}-{{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
'{{prefix_male}} {{first_name_male}} {{last_name}}',
'{{prefix_female}} {{first_name_female}} {{last_name}}',
)
first_names_male = (
'Adam', 'Albert', 'Aksel', 'Alex', 'Alexander', 'Alf', 'Allan',
'Alvin', 'Anders', 'André', 'Andreas', 'Anton', 'Arne', 'Asger',
'ugust', 'Benjamin', 'Benny', 'Bent', 'Bertil', 'Bertram', 'Birger',
'Bjarne', 'Bo', 'Bob', 'Bobby', 'Boe', 'Boris', 'Borris',
'Brian', 'Bruno', 'Bøje', 'Børge', 'Carl', 'Carlo', 'Carsten',
'Casper', 'Christian', 'Christoffer', 'Christopher', 'Claus', 'Clavs', 'Curt',
'Dan', 'Daniel', 'Danny', 'David', 'Dennis', 'Ebbe', 'Einar',
'Einer', 'Elias', 'Emil', 'Eric', 'Erik', 'Erling', 'Ernst',
'Esben', 'Finn', 'Flemming', 'Frank', 'Frans', 'Freddy', 'Frede',
'Frederik', 'Frode', 'Georg', 'George', 'Gert', 'Gorm', 'Gunnar',
'Gunner', 'Gustav', 'Hans', 'Helge', 'Henrik', 'Henry', 'Herbert',
'Herman', 'Hjalte', 'Holger', 'Hugo', 'Ib', 'Ivan', 'Iver',
'Jack', 'Jacob', 'Jakob', 'James', 'Jan', 'Jano', 'Jarl',
'Jean', 'Jens', 'Jeppe', 'Jesper', 'Jim', 'Jimmy', 'Joachim',
'Joakim', 'Johan', 'Johannes', 'John', 'Johnnie', 'Johnny', 'Jon',
'Jonas', 'Jonathan', 'Julius', 'Jørgen', 'Karl', 'Karlo', 'Karsten',
'Kaspar', 'Kasper', 'Keld', 'Ken', 'Kenn', 'Kenneth', 'Kenny',
'Kent', 'Kim', 'Kjeld', 'Klaus', 'Klavs', 'Kristian', 'Kurt',
'Kåre', 'Lars', 'Lasse', 'Laurits', 'Laus', 'Laust', 'Leif',
'Lennarth', 'Lucas', 'Ludvig', 'Mads', 'Magnus', 'Malthe', 'Marcus',
'Marius', 'Mark', 'Martin', 'Mathias', 'Matthias', 'Michael', 'Mik',
'Mikael', 'Mike', 'Mikkel', 'Mogens', 'Morten', 'Nick', 'Nicklas',
'Nicolai', 'Nicolaj', 'Niels', 'Nikolai', 'Nikolaj', 'Nils', 'Noah',
'Ole', 'Olfert', 'Oliver', 'Oscar', 'Oskar', 'Osvald', 'Otto',
'Ove', 'Palle', 'Patrick', 'Paw', 'Peder', 'Per', 'Pete',
'Peter', 'Paul', 'Philip', 'Poul', 'Preben', 'Ragnar', 'Ragner',
'Rasmus', 'René', 'Richard', 'Richardt', 'Robert', 'Robin', 'Rolf',
'Ron', 'Ronni', 'Ronnie', 'Ronny', 'Ruben', 'Rune', 'Sam',
'Sebastian', 'Silas', 'Simon', 'Simon', 'Sonny', 'Steen', 'Stefan',
'Sten', 'Stephan', 'Steve', 'Steven', 'Stig', 'Svenning', 'Søren',
'Tage', 'Tejs', 'Thomas', 'Tim', 'Timmy', 'Tobias', 'Tom',
'Tommy', 'Tonny', 'Torben', 'Troels', 'Uffe', 'Ulf', 'Ulrik',
'Vagn', 'Valdemar', 'Verner', 'Victor', 'Villads', 'Werner', 'William',
'Yan', 'Yannick', 'Yngve', 'Zacharias', 'Ziggy', 'Øivind', 'Øjvind',
'Ørni', 'Øvli', 'Øystein', 'Øyvind', 'Åbjørn', 'Aage', 'Åge',
)
first_names_female = (
'Abelone', 'Agnes', 'Agnete', 'Alberte', 'Alma', 'Amalie', 'Amanda',
'Andrea', 'Ane', 'Anette', 'Anna', 'Anne', 'Annemette', 'Annette',
'Asta', 'Astrid', 'Benedicte', 'Benedikte', 'Bente', 'Benthe', 'Berit',
'Berta', 'Beth', 'Bettina', 'Birgit', 'Birgitte', 'Birte', 'Birthe',
'Bitten', 'Bodil', 'Britt', 'Britta', 'Camilla', 'Carina', 'Carla',
'Caroline', 'Cathrine', 'Catrine', 'Cecilie', 'Charlotte', 'Christina', 'Christine',
'Cirkeline', 'Clara', 'Connie', 'Conny', 'Dagmar', 'Dagny', 'Daniella',
'Dina', 'Ditte', 'Doris', 'Dorte', 'Dorthe', 'Edith', 'Elin',
'Elisabeth', 'Ella', 'Ellen', 'Elna', 'Else', 'Elsebeth', 'Emilie',
'Emily', 'Emma', 'Erna', 'Esmarelda', 'Ester', 'Filippa', 'Frederikke',
'Freja', 'Frida', 'Gerda', 'Gertrud', 'Gitte', 'Grete', 'Grethe',
'Gundhild', 'Gunhild', 'Gurli', 'Gyda', 'Hannah', 'Hanne', 'Heidi',
'Helen', 'Helle', 'Henriette', 'Herdis', 'Iben', 'Ida', 'Inga',
'Inge', 'Ingelise', 'Inger', 'Ingrid', 'Irma', 'Isabella', 'Jacobine',
'Jacqueline', 'Janne', 'Janni', 'Jannie', 'Jasmin', 'Jean', 'Jenny',
'Joan', 'Johanne', 'Jonna', 'Josefine', 'Josephine', 'Julie', 'Justina',
'Jytte', 'Karen', 'Karin', 'Karina', 'Karla', 'Karoline', 'Katcha',
'Katja', 'Katrine', 'Kirsten', 'Kirstin', 'Kirstine', 'Klara', 'Kristina',
'Kristine', 'Laura', 'Lea', 'Lena', 'Lene', 'Leonora', 'Line',
'Liva', 'Lona', 'Lone', 'Lotte', 'Louise', 'Lærke', 'Maiken',
'Maja', 'Majken', 'Malene', 'Malou', 'Maren', 'Margit', 'Margrethe',
'Maria', 'Marianne', 'Marie', 'Marlene', 'Mathilde', 'Maya', 'Merete',
'Merethe', 'Mette', 'Mia', 'Michala', 'Michelle', 'Mie', 'Mille',
'Mimi', 'Minna', 'Nadia', 'Naja', 'Nana', 'Nanna', 'Nanni',
'Natasha', 'Natasja', 'Nete', 'Nicoline', 'Nina', 'Nora', 'Oda',
'Odeline', 'Odette', 'Ofelia', 'Olga', 'Olivia', 'Patricia', 'Paula',
'Paulina', 'Pernille', 'Pia', 'Ragna', 'Ragnhild', 'Randi', 'Rebecca',
'Regitse', 'Regitze', 'Rikke', 'Rita', 'Ritt', 'Ronja', 'Rosa',
'Ruth', 'Sabine', 'Sandra', 'Sanne', 'Sara', 'Sarah', 'Selma',
'Signe', 'Sigrid', 'Silje', 'Sille', 'Simone', 'Sine', 'Sofia',
'Sofie', 'Solveig', 'Solvej', 'Sonja', 'Sophie', 'Stina', 'Stine',
'Susanne', 'Sussanne', 'Sussie', 'Sys', 'Sørine', 'Søs', 'Tammy',
'Tanja', 'Thea', 'Tilde', 'Tina', 'Tine', 'Tove', 'Trine',
'Ulla', 'Ulrike', 'Ursula', 'Vera', 'Victoria', 'Viola', 'Vivian',
'Weena', 'Winni', 'Winnie', 'Xenia', 'Yasmin', 'Yda', 'Yrsa',
'Yvonne', 'Zahra', 'Zara', 'Zehnia', 'Zelma', 'Zenia', 'Åse',
)
first_names = first_names_male + first_names_female
last_names = (
'Jensen', 'Nielsen', 'Hansen', 'Pedersen', 'Andersen', 'Christensen', 'Larsen',
'Sørensen', 'Rasmussen', 'Petersen', 'Jørgensen', 'Madsen', 'Kristensen', 'Olsen',
'Christiansen', 'Thomsen', 'Poulsen', 'Johansen', 'Knudsen', 'Mortensen', 'Møller',
'Jacobsen', 'Jakobsen', 'Olesen', 'Frederiksen', 'Mikkelsen', 'Henriksen', 'Laursen',
'Lund', 'Schmidt', 'Eriksen', 'Holm', 'Kristiansen', 'Clausen', 'Simonsen',
'Svendsen', 'Andreasen', 'Iversen', 'Jeppesen', 'Mogensen', 'Jespersen', 'Nissen',
'Lauridsen', 'Frandsen', 'Østergaard', 'Jepsen', 'Kjær', 'Carlsen', 'Vestergaard',
'Jessen', 'Nørgaard', 'Dahl', 'Christoffersen', 'Skov', 'Søndergaard', 'Bertelsen',
'Bruun', 'Lassen', 'Bach', 'Gregersen', 'Friis', 'Johnsen', 'Steffensen',
'Kjeldsen', 'Bech', 'Krogh', 'Lauritsen', 'Danielsen', 'Mathiesen', 'Andresen',
'Brandt', 'Winther', 'Toft', 'Ravn', 'Mathiasen', 'Dam', 'Holst',
'Nilsson', 'Lind', 'Berg', 'Schou', 'Overgaard', 'Kristoffersen', 'Schultz',
'Klausen', 'Karlsen', 'Paulsen', 'Hermansen', 'Thorsen', 'Koch', 'Thygesen',
)
prefixes_male = (
'Hr', 'Dr.', 'Prof.', 'Univ.Prof.',
)
prefixes_female = (
'Fru', 'Dr.', 'Prof.', 'Univ.Prof.',
)
| deanishe/alfred-fakeum | src/libs/faker/providers/person/dk_DK/__init__.py | Python | mit | 7,620 | [
"Brian"
] | a401ce9b3640495f6c4a3b61aac789b56ece4a0c1b7a8993eef49c44f78657e9 |
################################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for gaussian_markov_chain module.
"""
import numpy as np
from ..gaussian_markov_chain import GaussianMarkovChain
from ..gaussian_markov_chain import VaryingGaussianMarkovChain
from ..gaussian import Gaussian, GaussianMoments
from ..gaussian import GaussianARD
from ..wishart import Wishart, WishartMoments
from ..gamma import Gamma, GammaMoments
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import misc
from bayespy.utils.misc import TestCase
def kalman_filter(y, U, A, V, mu0, Cov0, out=None):
"""
Perform Kalman filtering to obtain filtered mean and covariance.
The parameters of the process may vary in time, thus they are
given as iterators instead of fixed values.
Parameters
----------
y : (N,D) array
"Normalized" noisy observations of the states, that is, the
observations multiplied by the precision matrix U (and possibly
other transformation matrices).
U : (N,D,D) array or N-list of (D,D) arrays
Precision matrix (i.e., inverse covariance matrix) of the observation
noise for each time instance.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Filtered mean of the states.
Cov : array
Filtered covariance of the states.
See also
--------
rts_smoother
"""
mu = mu0
Cov = Cov0
# Allocate memory for the results
(N,D) = np.shape(y)
X = np.empty((N,D))
CovX = np.empty((N,D,D))
# Update step for t=0
M = np.dot(np.dot(Cov, U[0]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[0]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
X[0,:] = mu
CovX[0,:,:] = Cov
#for (yn, Un, An, Vn) in zip(y, U, A, V):
for n in range(len(y)-1): #(yn, Un, An, Vn) in zip(y, U, A, V):
# Prediction step
mu = np.dot(A[n], mu)
Cov = np.dot(np.dot(A[n], Cov), A[n].T) + V[n]
# Update step
M = np.dot(np.dot(Cov, U[n+1]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[n+1]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
# Force symmetric covariance (for numeric inaccuracy)
Cov = 0.5*Cov + 0.5*Cov.T
# Store results
X[n+1,:] = mu
CovX[n+1,:,:] = Cov
return (X, CovX)
def rts_smoother(mu, Cov, A, V, removethis=None):
"""
Perform Rauch-Tung-Striebel smoothing to obtain the posterior.
The function returns the posterior mean and covariance of each
state. The parameters of the process may vary in time, thus they
are given as iterators instead of fixed values.
Parameters
----------
mu : (N,D) array
Mean of the states from Kalman filter.
Cov : (N,D,D) array
Covariance of the states from Kalman filter.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Posterior mean of the states.
Cov : array
Posterior covariance of the states.
See also
--------
kalman_filter
"""
N = len(mu)
#n = N-1
# Start from the last time instance and smoothen backwards
x = mu[-1,:]
Covx = Cov[-1,:,:]
for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)):
#n = n - 1
#if n <= 0:
# break
# The predicted value of n
x_p = np.dot(A[n], mu[n,:])
Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n]
# Temporary variable
S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:]))
# Smoothed value of n
x = mu[n,:] + np.dot(S.T, x-x_p)
Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S)
# Force symmetric covariance (for numeric inaccuracy)
Covx = 0.5*Covx + 0.5*Covx.T
# Store results
mu[n,:] = x
Cov[n,:] = Covx
return (mu, Cov)
class TestGaussianMarkovChain(TestCase):
def create_model(self, N, D):
# Construct the model
Mu = Gaussian(np.random.randn(D),
np.identity(D))
Lambda = Wishart(D,
random.covariance(D))
A = Gaussian(np.random.randn(D,D),
np.identity(D))
V = Gamma(D,
np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N)
Y = Gaussian(X, np.identity(D))
return (Y, X, Mu, Lambda, A, V)
def test_plates(self):
"""
Test that plates are handled correctly.
"""
def test_message_to_mu0(self):
pass
def test_message_to_Lambda0(self):
pass
def test_message_to_A(self):
pass
def test_message_to_v(self):
pass
def test_message_to_child(self):
"""
Test the updating of GaussianMarkovChain.
Check that the moments and the lower bound contribution are computed
correctly.
"""
# TODO: Add plates and missing values!
# Dimensionalities
D = 3
N = 5
(Y, X, Mu, Lambda, A, V) = self.create_model(N, D)
# Inference with arbitrary observations
y = np.random.randn(N,D)
Y.observe(y)
X.update()
(x_vb, xnxn_vb, xpxn_vb) = X.get_moments()
# Get parameter moments
(mu0, mumu0) = Mu.get_moments()
(icov0, logdet0) = Lambda.get_moments()
(a, aa) = A.get_moments()
(icov_x, logdetx) = V.get_moments()
icov_x = np.diag(icov_x)
# Prior precision
Z = np.einsum('...kij,...kk->...ij', aa, icov_x)
U_diag = [icov0+Z] + (N-2)*[icov_x+Z] + [icov_x]
U_super = (N-1) * [-np.dot(a.T, icov_x)]
U = misc.block_banded(U_diag, U_super)
# Prior mean
mu_prior = np.zeros(D*N)
mu_prior[:D] = np.dot(icov0,mu0)
# Data
Cov = np.linalg.inv(U + np.identity(D*N))
mu = np.dot(Cov, mu_prior + y.flatten())
# Moments
xx = mu[:,np.newaxis]*mu[np.newaxis,:] + Cov
mu = np.reshape(mu, (N,D))
xx = np.reshape(xx, (N,D,N,D))
# Check results
self.assertAllClose(x_vb, mu,
msg="Incorrect mean")
for n in range(N):
self.assertAllClose(xnxn_vb[n,:,:], xx[n,:,n,:],
msg="Incorrect second moment")
for n in range(N-1):
self.assertAllClose(xpxn_vb[n,:,:], xx[n,:,n+1,:],
msg="Incorrect lagged second moment")
# Compute the entropy H(X)
ldet = linalg.logdet_cov(Cov)
H = random.gaussian_entropy(-ldet, N*D)
# Compute <log p(X|...)>
xx = np.reshape(xx, (N*D, N*D))
mu = np.reshape(mu, (N*D,))
ldet = -logdet0 - np.sum(np.ones((N-1,D))*logdetx)
P = random.gaussian_logpdf(np.einsum('...ij,...ij',
xx,
U),
np.einsum('...i,...i',
mu,
mu_prior),
np.einsum('...ij,...ij',
mumu0,
icov0),
-ldet,
N*D)
# The VB bound from the net
l = X.lower_bound_contribution()
self.assertAllClose(l, H+P)
# Compute the true bound <log p(X|...)> + H(X)
#
# Simple tests
#
def check(N, D, plates=None, mu=None, Lambda=None, A=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if A is None:
A = np.random.randn(D,D)
if V is None:
V = np.random.rand(D)
X = GaussianMarkovChain(mu,
Lambda,
A,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = Gaussian._ensure_moments(mu, GaussianMoments, ndim=1).get_moments()
(Lambda, _) = Wishart._ensure_moments(Lambda, WishartMoments, ndim=1).get_moments()
(a, aa) = Gaussian._ensure_moments(A, GaussianMoments, ndim=1).get_moments()
a = a * np.ones((N-1,D,D)) # explicit broadcasting for simplicity
aa = aa * np.ones((N-1,D,D,D)) # explicit broadcasting for simplicity
(v, _) = Gamma._ensure_moments(V, GammaMoments).get_moments()
v = v * np.ones((N-1,D))
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
aa[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
aa[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
a[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
a[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(4,1)
check(4,3)
#
# Test mu
#
# Simple
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test A
#
# Simple
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
# Plates on time axis
check(5,3,
A=GaussianARD(np.random.randn(4,3,3),
np.random.rand(4,3,3),
shape=(3,),
plates=(4,3)))
# Plates on time axis with broadcasted moments
check(5,3,
A=GaussianARD(np.random.randn(1,3,3),
np.random.rand(1,3,3),
shape=(3,),
plates=(4,3)))
check(5,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(4,3)))
# Plates
check(4,3,
A=GaussianARD(np.random.randn(5,6,1,3,3),
np.random.rand(5,6,1,3,3),
shape=(3,),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(5,1,3)))
check(4,3,
A=GaussianARD(np.random.randn(1,1,3,3),
np.random.rand(1,1,3,3),
shape=(3,),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(1,1,3)))
#
# Test v
#
# Simple
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
V=Gamma(np.random.rand(3),
np.random.rand(3),
plates=(3,)))
# Plates
check(4,3,
V=Gamma(np.random.rand(5,6,1,3),
np.random.rand(5,6,1,3),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(5,1,3)))
check(4,3,
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(1,1,3)))
#
# Check with input signals
#
mu = 2
Lambda = 3
A = 4
B = 5
v = 6
inputs = [[-2], [3]]
X = GaussianMarkovChain([mu], [[Lambda]], [[A,B]], [v], inputs=inputs)
V = (np.array([[v*A**2, -v*A, 0],
[-v*A, v*A**2, -v*A],
[0, -v*A, 0]]) +
np.array([[Lambda, 0, 0],
[0, v, 0],
[0, 0, v]]))
m = (np.array([Lambda*mu, 0, 0]) +
np.array([0, v*B*inputs[0][0], v*B*inputs[1][0]]) -
np.array([v*A*B*inputs[0][0], v*A*B*inputs[1][0], 0]))
Cov = np.linalg.inv(V)
mean = np.dot(Cov, m)
X.update()
u = X.get_moments()
self.assertAllClose(u[0], mean[:,None])
self.assertAllClose(u[1] - u[0][...,None,:]*u[0][...,:,None],
Cov[(0,1,2),(0,1,2),None,None])
self.assertAllClose(u[2] - u[0][...,:-1,:,None]*u[0][...,1:,None,:],
Cov[(0,1),(1,2),None,None])
pass
def test_smoothing(self):
"""
Test the posterior estimation of GaussianMarkovChain.
Create time-variant dynamics and compare the results of BayesPy VB
inference and standard Kalman filtering & smoothing.
This is not that useful anymore, because the moments are checked much
better in another test method.
"""
#
# Set up an artificial system
#
# Dimensions
N = 500
D = 2
# Dynamics (time varying)
A0 = np.array([[.9, -.4], [.4, .9]])
A1 = np.array([[.98, -.1], [.1, .98]])
l = np.linspace(0, 1, N-1).reshape((-1,1,1))
A = (1-l)*A0 + l*A1
# Innovation covariance matrix (time varying)
v = np.random.rand(D)
V = np.diag(v)
# Observation noise covariance matrix
C = np.identity(D)
#
# Simulate data
#
X = np.empty((N,D))
Y = np.empty((N,D))
x = np.array([0.5, -0.5])
X[0,:] = x
Y[0,:] = x + np.random.multivariate_normal(np.zeros(D), C)
for n in range(N-1):
x = np.dot(A[n,:,:],x) + np.random.multivariate_normal(np.zeros(D), V)
X[n+1,:] = x
Y[n+1,:] = x + np.random.multivariate_normal(np.zeros(D), C)
#
# BayesPy inference
#
# Construct VB model
Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, 1/v, n=N)
Yh = Gaussian(Xh, np.identity(D), plates=(N,))
# Put data
Yh.observe(Y)
# Run inference
Xh.update()
# Store results
Xh_vb = Xh.u[0]
CovXh_vb = Xh.u[1] - Xh_vb[...,np.newaxis,:] * Xh_vb[...,:,np.newaxis]
#
# "The ground truth" using standard Kalman filter and RTS smoother
#
V = N*(V,)
UY = Y
U = N*(C,)
(Xh, CovXh) = kalman_filter(UY, U, A, V, np.zeros(D), np.identity(D))
(Xh, CovXh) = rts_smoother(Xh, CovXh, A, V)
#
# Check results
#
self.assertTrue(np.allclose(Xh_vb, Xh))
self.assertTrue(np.allclose(CovXh_vb, CovXh))
class TestVaryingGaussianMarkovChain(TestCase):
def test_plates_from_parents(self):
"""
Test that VaryingGaussianMarkovChain deduces plates correctly
"""
def check(plates_X,
plates_mu=(),
plates_Lambda=(),
plates_B=(),
plates_S=(),
plates_v=()):
D = 3
K = 2
N = 4
np.random.seed(42)
mu = Gaussian(np.random.randn(*(plates_mu+(D,))),
random.covariance(D))
Lambda = Wishart(D+np.ones(plates_Lambda),
random.covariance(D))
B = GaussianARD(np.random.randn(*(plates_B+(D,D,K))),
1+np.random.rand(*(plates_B+(D,D,K))),
shape=(D,K),
plates=plates_B+(D,))
S = GaussianARD(np.random.randn(*(plates_S+(N,K))),
1+np.random.rand(*(plates_S+(N,K))),
shape=(K,),
plates=plates_S+(N,))
v = Gamma(1+np.random.rand(*(plates_v+(1,D))),
1+np.random.rand(*(plates_v+(1,D))))
X = VaryingGaussianMarkovChain(mu, Lambda, B, S, v, name="X")
self.assertEqual(plates_X, X.plates,
msg="Incorrect plates deduced")
pass
check(())
check((2,3),
plates_mu=(2,3))
check((6,7),
plates_Lambda=(6,7))
check((2,3),
plates_B=(2,3))
check((2,3),
plates_S=(2,3))
check((2,3),
plates_v=(2,3))
pass
def test_message_to_child(self):
# A very simple check before the more complex ones:
# 1-D process, k=1, fixed constant parameters
m = 1.0
l = 4.0
b = 2.0
s = [3.0, 8.0]
v = 5.0
X = VaryingGaussianMarkovChain([m],
[[l]],
[[[b]]],
[[s[0]],[s[1]]],
[v])
(u0, u1, u2) = X._message_to_child()
C = np.array([[l+b**2*s[0]**2*v, -b*s[0]*v, 0],
[ -b*s[0]*v, v+b**2*s[1]**2*v, -b*s[1]*v],
[ 0, -b*s[1]*v, v]])
Cov = np.linalg.inv(C)
m0 = np.dot(Cov, [[l*m], [0], [0]])
m1 = np.diag(Cov)[:,None,None] + m0[:,:,None]**2
m2 = np.diag(Cov, k=1)[:,None,None] + m0[1:,:,None]*m0[:-1,:,None]
self.assertAllClose(m0, u0)
self.assertAllClose(m1, u1)
self.assertAllClose(m2, u2)
def check(N, D, K, plates=None, mu=None, Lambda=None, B=None, S=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if B is None:
B = np.random.randn(D,D,K)
if S is None:
S = np.random.randn(N-1,K)
if V is None:
V = np.random.rand(D)
X = VaryingGaussianMarkovChain(mu,
Lambda,
B,
S,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = X.parents[0].get_moments()
(Lambda, _) = X.parents[1].get_moments()
(b, bb) = X.parents[2].get_moments()
(s, ss) = X.parents[3].get_moments()
(v, _) = X.parents[4].get_moments()
v = v * np.ones((N-1,D))
#V = np.atleast_3d(v)[...,-1,:,None]*np.identity(D)
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
#m = np.reshape(m, plates_mu + (N*D,))
A = np.einsum('...dik,...nk->...ndi', b, s)
AA = np.einsum('...dikjl,...nkl->...ndij', bb, ss)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
AA[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
AA[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
A[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
A[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(2,1,1)
check(2,3,1)
check(2,1,3)
check(4,3,2)
#
# Test mu
#
# Simple
check(4,3,2,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,2,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,2,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,2,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,2,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,2,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,2,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test B
#
# Simple
check(4,3,2,
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(3,)))
# Plates
check(4,3,2,
B=GaussianARD(np.random.randn(5,6,3,3,2),
np.random.rand(5,6,3,3,2),
shape=(3,2),
plates=(5,6,3)))
# Plates with moments broadcasted over plates
check(4,3,2,
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(5,3)))
check(4,3,2,
B=GaussianARD(np.random.randn(1,3,3,2),
np.random.rand(1,3,3,2),
shape=(3,2),
plates=(5,3)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(3,)))
check(4,3,2,
plates=(5,),
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(1,3)))
#
# Test S
#
# Simple
check(4,3,2,
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(4-1,)))
# Plates
check(4,3,2,
S=GaussianARD(np.random.randn(5,6,4-1,2),
np.random.rand(5,6,4-1,2),
shape=(2,),
plates=(5,6,4-1,)))
# Plates with moments broadcasted over plates
check(4,3,2,
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(5,4-1,)))
check(4,3,2,
S=GaussianARD(np.random.randn(1,4-1,2),
np.random.rand(1,4-1,2),
shape=(2,),
plates=(5,4-1,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(4-1,)))
check(4,3,2,
plates=(5,),
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(1,4-1,)))
#
# Test v
#
# Simple
check(4,3,2,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,2,
V=Gamma(np.random.rand(3),
np.random.rand(3),
plates=(3,)))
# Plates
check(4,3,2,
V=Gamma(np.random.rand(5,6,1,3),
np.random.rand(5,6,1,3),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,2,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(5,1,3)))
check(4,3,2,
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,2,
plates=(5,),
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(1,1,3)))
#
# Uncertainty in both B and S
#
check(4,3,2,
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(3,)),
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(4-1,)))
pass
def test_message_to_mu(self):
# TODO
pass
def test_message_to_Lambda(self):
# TODO
pass
def test_message_to_B(self):
# TODO
pass
def test_message_to_S(self):
# TODO
pass
def test_message_to_v(self):
# TODO
pass
| dungvtdev/upsbayescpm | bayespy/inference/vmp/nodes/tests/test_gaussian_markov_chain.py | Python | mit | 34,665 | [
"Gaussian"
] | b44dc7a0ffb7c06f44560960b5fa4b98d8c27ce95ceddaf1526a240d3ae8a0d1 |
#!/usr/bin/env python
# Copyright (c) 2012, 2014 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dam Sunwoo
#
# This script converts gem5 output to ARM DS-5 Streamline .apc project file
# (Requires the gem5 runs to be run with ContextSwitchStatsDump enabled and
# some patches applied to target Linux kernel.)
# Visit http://www.gem5.org/Streamline for more details.
#
# Usage:
# m5stats2streamline.py <stat_config.ini> <gem5 run folder> <dest .apc folder>
#
# <stat_config.ini>: .ini file that describes which stats to be included
# in conversion. Sample .ini files can be found in
# util/streamline.
# NOTE: this is NOT the gem5 config.ini file.
#
# <gem5 run folder>: Path to gem5 run folder (must contain config.ini,
# stats.txt[.gz], and system.tasks.txt.)
#
# <dest .apc folder>: Destination .apc folder path
#
# APC project generation based on Gator v17 (DS-5 v5.17)
# Subsequent versions should be backward compatible
import re, sys, os
from ConfigParser import ConfigParser
import gzip
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import shutil
import zlib
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Converts gem5 runs to ARM DS-5 Streamline .apc project file.
(NOTE: Requires gem5 runs to be run with ContextSwitchStatsDump
enabled and some patches applied to the target Linux kernel.)
Visit http://www.gem5.org/Streamline for more details.
APC project generation based on Gator v17 (DS-5 v5.17)
Subsequent versions should be backward compatible
""")
parser.add_argument("stat_config_file", metavar="<stat_config.ini>",
help=".ini file that describes which stats to be included \
in conversion. Sample .ini files can be found in \
util/streamline. NOTE: this is NOT the gem5 config.ini \
file.")
parser.add_argument("input_path", metavar="<gem5 run folder>",
help="Path to gem5 run folder (must contain config.ini, \
stats.txt[.gz], and system.tasks.txt.)")
parser.add_argument("output_path", metavar="<dest .apc folder>",
help="Destination .apc folder path")
parser.add_argument("--num-events", action="store", type=int,
default=1000000,
help="Maximum number of scheduling (context switch) \
events to be processed. Set to truncate early. \
Default=1000000")
parser.add_argument("--gzipped-bmp-not-supported", action="store_true",
help="Do not use gzipped .bmp files for visual annotations. \
This option is only required when using Streamline versions \
older than 5.14")
parser.add_argument("--verbose", action="store_true",
help="Enable verbose output")
args = parser.parse_args()
if not re.match("(.*)\.apc", args.output_path):
print "ERROR: <dest .apc folder> should end with '.apc'!"
sys.exit(1)
# gzipped BMP files for visual annotation is supported in Streamline 5.14.
# Setting this to True will significantly compress the .apc binary file that
# includes frame buffer snapshots.
gzipped_bmp_supported = not args.gzipped_bmp_not_supported
ticks_in_ns = -1
# Default max # of events. Increase this for longer runs.
num_events = args.num_events
start_tick = -1
end_tick = -1
# Parse gem5 config.ini file to determine some system configurations.
# Number of CPUs, L2s, etc.
def parseConfig(config_file):
global num_cpus, num_l2
print "\n==============================="
print "Parsing gem5 config.ini file..."
print config_file
print "===============================\n"
config = ConfigParser()
if not config.read(config_file):
print "ERROR: config file '", config_file, "' not found"
sys.exit(1)
if config.has_section("system.cluster.cpu"):
num_cpus = 1
else:
num_cpus = 0
while config.has_section("system.cluster.cpu" + str(num_cpus)):
num_cpus += 1
if config.has_section("system.cluster.l2_cache"):
num_l2 = 1
else:
num_l2 = 0
while config.has_section("system.cluster.l2_cache" + str(num_l2)):
num_l2 += 1
print "Num CPUs:", num_cpus
print "Num L2s:", num_l2
print ""
return (num_cpus, num_l2)
process_dict = {}
thread_dict = {}
process_list = []
idle_uid = -1
kernel_uid = -1
class Task(object):
def __init__(self, uid, pid, tgid, task_name, is_process, tick):
if pid == 0: # Idle
self.uid = 0
elif pid == -1: # Kernel
self.uid = 0
else:
self.uid = uid
self.pid = pid
self.tgid = tgid
self.is_process = is_process
self.task_name = task_name
self.children = []
self.tick = tick # time this task first appeared
class Event(object):
def __init__(self, tick, task):
self.tick = tick
self.task = task
############################################################
# Types used in APC Protocol
# - packed32, packed64
# - int32
# - string
############################################################
def packed32(x):
ret = []
more = True
while more:
b = x & 0x7f
x = x >> 7
if (((x == 0) and ((b & 0x40) == 0)) or \
((x == -1) and ((b & 0x40) != 0))):
more = False
else:
b = b | 0x80
ret.append(b)
return ret
# For historical reasons, 32/64-bit versions of functions are presevered
def packed64(x):
return packed32(x)
# variable length packed 4-byte signed value
def unsigned_packed32(x):
ret = []
if ((x & 0xffffff80) == 0):
ret.append(x & 0x7f)
elif ((x & 0xffffc000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append((x >> 7) & 0x7f)
elif ((x & 0xffe00000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append((x >> 14) & 0x7f)
elif ((x & 0xf0000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append((x >> 21) & 0x7f)
else:
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append((x >> 28) & 0x0f)
return ret
# variable length packed 8-byte signed value
def unsigned_packed64(x):
ret = []
if ((x & 0xffffffffffffff80) == 0):
ret.append(x & 0x7f)
elif ((x & 0xffffffffffffc000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append((x >> 7) & 0x7f)
elif ((x & 0xffffffffffe00000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append((x >> 14) & 0x7f)
elif ((x & 0xfffffffff0000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append((x >> 21) & 0x7f)
elif ((x & 0xfffffff800000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append((x >> 28) & 0x7f)
elif ((x & 0xfffffc0000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append((x >> 35) & 0x7f)
elif ((x & 0xfffe000000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append((x >> 42) & 0x7f)
elif ((x & 0xff00000000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append(((x >> 42) | 0x80) & 0xff)
ret.append((x >> 49) & 0x7f)
elif ((x & 0x8000000000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append(((x >> 42) | 0x80) & 0xff)
ret.append(((x >> 49) | 0x80) & 0xff)
ret.append((x >> 56) & 0x7f)
else:
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append(((x >> 42) | 0x80) & 0xff)
ret.append(((x >> 49) | 0x80) & 0xff)
ret.append(((x >> 56) | 0x80) & 0xff)
ret.append((x >> 63) & 0x7f)
return ret
# 4-byte signed little endian
def int32(x):
ret = []
ret.append(x & 0xff)
ret.append((x >> 8) & 0xff)
ret.append((x >> 16) & 0xff)
ret.append((x >> 24) & 0xff)
return ret
# 2-byte signed little endian
def int16(x):
ret = []
ret.append(x & 0xff)
ret.append((x >> 8) & 0xff)
return ret
# a packed32 length followed by the specified number of characters
def stringList(x):
ret = []
ret += packed32(len(x))
for i in x:
ret.append(i)
return ret
def utf8StringList(x):
ret = []
for i in x:
ret.append(ord(i))
return ret
# packed64 time value in nanoseconds relative to the uptime from the
# Summary message.
def timestampList(x):
ret = packed64(x)
return ret
############################################################
# Write binary
############################################################
def writeBinary(outfile, binary_list):
for i in binary_list:
outfile.write("%c" % i)
############################################################
# APC Protocol Frame Types
############################################################
def addFrameHeader(frame_type, body, core):
ret = []
if frame_type == "Summary":
code = 1
elif frame_type == "Backtrace":
code = 2
elif frame_type == "Name":
code = 3
elif frame_type == "Counter":
code = 4
elif frame_type == "Block Counter":
code = 5
elif frame_type == "Annotate":
code = 6
elif frame_type == "Sched Trace":
code = 7
elif frame_type == "GPU Trace":
code = 8
elif frame_type == "Idle":
code = 9
else:
print "ERROR: Unknown frame type:", frame_type
sys.exit(1)
packed_code = packed32(code)
packed_core = packed32(core)
length = int32(len(packed_code) + len(packed_core) + len(body))
ret = length + packed_code + packed_core + body
return ret
# Summary frame
# - timestamp: packed64
# - uptime: packed64
def summaryFrame(timestamp, uptime):
frame_type = "Summary"
newline_canary = stringList("1\n2\r\n3\r4\n\r5")
monotonic_delta = packed64(0)
end_of_attr = stringList("")
body = newline_canary + packed64(timestamp) + packed64(uptime)
body += monotonic_delta + end_of_attr
ret = addFrameHeader(frame_type, body, 0)
return ret
# Backtrace frame
# - not implemented yet
def backtraceFrame():
pass
# Cookie name message
# - cookie: packed32
# - name: string
def cookieNameFrame(cookie, name):
frame_type = "Name"
packed_code = packed32(1)
body = packed_code + packed32(cookie) + stringList(name)
ret = addFrameHeader(frame_type, body, 0)
return ret
# Thread name message
# - timestamp: timestamp
# - thread id: packed32
# - name: string
def threadNameFrame(timestamp, thread_id, name):
frame_type = "Name"
packed_code = packed32(2)
body = packed_code + timestampList(timestamp) + \
packed32(thread_id) + stringList(name)
ret = addFrameHeader(frame_type, body, 0)
return ret
# Core name message
# - name: string
# - core_id: packed32
# - cpuid: packed32
def coreNameFrame(name, core_id, cpuid):
frame_type = "Name"
packed_code = packed32(3)
body = packed_code + packed32(core_id) + packed32(cpuid) + stringList(name)
ret = addFrameHeader(frame_type, body, 0)
return ret
# IRQ Cookie name message
# - cookie: packed32
# - name: string
# - irq: packed32
def irqCookieNameFrame(cookie, name, irq):
frame_type = "Name"
packed_code = packed32(5)
body = packed_code + packed32(cookie) + stringList(name) + packed32(irq)
ret = addFrameHeader(frame_type, body, 0)
return ret
# Counter frame message
# - timestamp: timestamp
# - core: packed32
# - key: packed32
# - value: packed64
def counterFrame(timestamp, core, key, value):
frame_type = "Counter"
body = timestampList(timestamp) + packed32(core) + packed32(key) + \
packed64(value)
ret = addFrameHeader(frame_type, body, core)
return ret
# Block Counter frame message
# - key: packed32
# - value: packed64
def blockCounterFrame(core, key, value):
frame_type = "Block Counter"
body = packed32(key) + packed64(value)
ret = addFrameHeader(frame_type, body, core)
return ret
# Annotate frame messages
# - core: packed32
# - tid: packed32
# - timestamp: timestamp
# - size: packed32
# - body
def annotateFrame(core, tid, timestamp, size, userspace_body):
frame_type = "Annotate"
body = packed32(core) + packed32(tid) + timestampList(timestamp) + \
packed32(size) + userspace_body
ret = addFrameHeader(frame_type, body, core)
return ret
# Scheduler Trace frame messages
# Sched Switch
# - Code: 1
# - timestamp: timestamp
# - pid: packed32
# - tid: packed32
# - cookie: packed32
# - state: packed32
def schedSwitchFrame(core, timestamp, pid, tid, cookie, state):
frame_type = "Sched Trace"
body = packed32(1) + timestampList(timestamp) + packed32(pid) + \
packed32(tid) + packed32(cookie) + packed32(state)
ret = addFrameHeader(frame_type, body, core)
return ret
# Sched Thread Exit
# - Code: 2
# - timestamp: timestamp
# - tid: packed32
def schedThreadExitFrame(core, timestamp, pid, tid, cookie, state):
frame_type = "Sched Trace"
body = packed32(2) + timestampList(timestamp) + packed32(tid)
ret = addFrameHeader(frame_type, body, core)
return ret
# GPU Trace frame messages
# - Not implemented yet
def gpuTraceFrame():
pass
# Idle frame messages
# Enter Idle
# - code: 1
# - timestamp: timestamp
# - core: packed32
def enterIdleFrame(timestamp, core):
frame_type = "Idle"
body = packed32(1) + timestampList(timestamp) + packed32(core)
ret = addFrameHeader(frame_type, body, core)
return ret
# Exit Idle
# - code: 2
# - timestamp: timestamp
# - core: packed32
def exitIdleFrame(timestamp, core):
frame_type = "Idle"
body = packed32(2) + timestampList(timestamp) + packed32(core)
ret = addFrameHeader(frame_type, body, core)
return ret
####################################################################
def parseProcessInfo(task_file):
print "\n==============================="
print "Parsing Task file..."
print task_file
print "===============================\n"
global start_tick, end_tick, num_cpus
global process_dict, thread_dict, process_list
global event_list, unified_event_list
global idle_uid, kernel_uid
event_list = []
unified_event_list = []
for cpu in range(num_cpus):
event_list.append([])
uid = 1 # uid 0 is reserved for idle
# Dummy Tasks for frame buffers and system diagrams
process = Task(uid, 9999, 9999, "framebuffer", True, 0)
process_list.append(process)
uid += 1
thread = Task(uid, 9999, 9999, "framebuffer", False, 0)
process.children.append(thread)
uid += 1
process = Task(uid, 9998, 9998, "System", True, 0)
process_list.append(process)
# if we don't find the real kernel, use this to keep things going
kernel_uid = uid
uid += 1
thread = Task(uid, 9998, 9998, "System", False, 0)
process.children.append(thread)
uid += 1
ext = os.path.splitext(task_file)[1]
try:
if ext == ".gz":
process_file = gzip.open(task_file, 'rb')
else:
process_file = open(task_file, 'rb')
except:
print "ERROR opening task file:", task_file
print "Make sure context switch task dumping is enabled in gem5."
sys.exit(1)
process_re = re.compile("tick=(\d+)\s+(\d+)\s+cpu_id=(\d+)\s+" +
"next_pid=([-\d]+)\s+next_tgid=([-\d]+)\s+next_task=(.*)")
task_name_failure_warned = False
for line in process_file:
match = re.match(process_re, line)
if match:
tick = int(match.group(1))
if (start_tick < 0):
start_tick = tick
cpu_id = int(match.group(3))
pid = int(match.group(4))
tgid = int(match.group(5))
task_name = match.group(6)
if not task_name_failure_warned:
if task_name == "FailureIn_curTaskName":
print "-------------------------------------------------"
print "WARNING: Task name not set correctly!"
print "Process/Thread info will not be displayed correctly"
print "Perhaps forgot to apply m5struct.patch to kernel?"
print "-------------------------------------------------"
task_name_failure_warned = True
if not tgid in process_dict:
if tgid == pid:
# new task is parent as well
if args.verbose:
print "new process", uid, pid, tgid, task_name
if tgid == 0:
# new process is the "idle" task
process = Task(uid, pid, tgid, "idle", True, tick)
idle_uid = 0
else:
process = Task(uid, pid, tgid, task_name, True, tick)
else:
if tgid == 0:
process = Task(uid, tgid, tgid, "idle", True, tick)
idle_uid = 0
else:
# parent process name not known yet
process = Task(uid, tgid, tgid, "_Unknown_", True, tick)
if tgid == -1: # kernel
kernel_uid = 0
uid += 1
process_dict[tgid] = process
process_list.append(process)
else:
if tgid == pid:
if process_dict[tgid].task_name == "_Unknown_":
if args.verbose:
print "new process", \
process_dict[tgid].uid, pid, tgid, task_name
process_dict[tgid].task_name = task_name
if process_dict[tgid].task_name != task_name and tgid != 0:
process_dict[tgid].task_name = task_name
if not pid in thread_dict:
if args.verbose:
print "new thread", \
uid, process_dict[tgid].uid, pid, tgid, task_name
thread = Task(uid, pid, tgid, task_name, False, tick)
uid += 1
thread_dict[pid] = thread
process_dict[tgid].children.append(thread)
else:
if thread_dict[pid].task_name != task_name:
thread_dict[pid].task_name = task_name
if args.verbose:
print tick, uid, cpu_id, pid, tgid, task_name
task = thread_dict[pid]
event = Event(tick, task)
event_list[cpu_id].append(event)
unified_event_list.append(event)
if len(unified_event_list) == num_events:
print "Truncating at", num_events, "events!"
break
print "Found %d events." % len(unified_event_list)
for process in process_list:
if process.pid > 9990: # fix up framebuffer ticks
process.tick = start_tick
print process.uid, process.pid, process.tgid, \
process.task_name, str(process.tick)
for thread in process.children:
if thread.pid > 9990:
thread.tick = start_tick
print "\t", thread.uid, thread.pid, thread.tgid, \
thread.task_name, str(thread.tick)
end_tick = tick
print "Start tick:", start_tick
print "End tick: ", end_tick
print ""
return
def initOutput(output_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
def ticksToNs(tick):
if ticks_in_ns < 0:
print "ticks_in_ns not set properly!"
sys.exit(1)
return tick / ticks_in_ns
def writeXmlFile(xml, filename):
f = open(filename, "w")
txt = ET.tostring(xml)
f.write(minidom.parseString(txt).toprettyxml())
f.close()
# StatsEntry that contains individual statistics
class StatsEntry(object):
def __init__(self, name, group, group_index, per_cpu, key):
# Full name of statistics
self.name = name
# Streamline group name that statistic will belong to
self.group = group
# Index of statistics within group (used to change colors within groups)
self.group_index = group_index
# Shorter name with "system" stripped off
# and symbols converted to alphanumerics
self.short_name = re.sub("system\.", "", name)
self.short_name = re.sub(":", "_", name)
# Regex for this stat (string version used to construct union regex)
self.regex_string = "^" + name + "\s+([\d\.]+)"
self.regex = re.compile("^" + name + "\s+([\d\.e\-]+)\s+# (.*)$", re.M)
self.description = ""
# Whether this stat is use per CPU or not
self.per_cpu = per_cpu
# Key used in .apc protocol (as described in captured.xml)
self.key = key
# List of values of stat per timestamp
self.values = []
# Whether this stat has been found for the current timestamp
self.found = False
# Whether this stat has been found at least once
# (to suppress too many warnings)
self.not_found_at_least_once = False
# Field used to hold ElementTree subelement for this stat
self.ET_element = None
# Create per-CPU stat name and regex, etc.
if self.per_cpu:
self.per_cpu_regex_string = []
self.per_cpu_regex = []
self.per_cpu_name = []
self.per_cpu_found = []
for i in range(num_cpus):
if num_cpus > 1:
per_cpu_name = re.sub("#", str(i), self.name)
else:
per_cpu_name = re.sub("#", "", self.name)
self.per_cpu_name.append(per_cpu_name)
print "\t", per_cpu_name
self.per_cpu_regex_string.\
append("^" + per_cpu_name + "\s+[\d\.]+")
self.per_cpu_regex.append(re.compile("^" + per_cpu_name + \
"\s+([\d\.e\-]+)\s+# (.*)$", re.M))
self.values.append([])
self.per_cpu_found.append(False)
def append_value(self, val, per_cpu_index = None):
if self.per_cpu:
self.values[per_cpu_index].append(str(val))
else:
self.values.append(str(val))
# Global stats object that contains the list of stats entries
# and other utility functions
class Stats(object):
def __init__(self):
self.stats_list = []
self.tick_list = []
self.next_key = 1
def register(self, name, group, group_index, per_cpu):
print "registering stat:", name, "group:", group, group_index
self.stats_list.append(StatsEntry(name, group, group_index, per_cpu, \
self.next_key))
self.next_key += 1
# Union of all stats to accelerate parsing speed
def createStatsRegex(self):
regex_strings = [];
print "\nnum entries in stats_list", len(self.stats_list)
for entry in self.stats_list:
if entry.per_cpu:
for i in range(num_cpus):
regex_strings.append(entry.per_cpu_regex_string[i])
else:
regex_strings.append(entry.regex_string)
self.regex = re.compile('|'.join(regex_strings))
def registerStats(config_file):
print "==============================="
print "Parsing stats config.ini file..."
print config_file
print "==============================="
config = ConfigParser()
if not config.read(config_file):
print "ERROR: config file '", config_file, "' not found!"
sys.exit(1)
print "\nRegistering Stats..."
stats = Stats()
per_cpu_stat_groups = config.options('PER_CPU_STATS')
for group in per_cpu_stat_groups:
i = 0
per_cpu_stats_list = config.get('PER_CPU_STATS', group).split('\n')
for item in per_cpu_stats_list:
if item:
stats.register(item, group, i, True)
i += 1
per_l2_stat_groups = config.options('PER_L2_STATS')
for group in per_l2_stat_groups:
i = 0
per_l2_stats_list = config.get('PER_L2_STATS', group).split('\n')
for item in per_l2_stats_list:
if item:
for l2 in range(num_l2):
if num_l2 > 1:
name = re.sub("#", str(l2), item)
else:
name = re.sub("#", "", item)
stats.register(name, group, i, False)
i += 1
other_stat_groups = config.options('OTHER_STATS')
for group in other_stat_groups:
i = 0
other_stats_list = config.get('OTHER_STATS', group).split('\n')
for item in other_stats_list:
if item:
stats.register(item, group, i, False)
i += 1
stats.createStatsRegex()
return stats
# Parse and read in gem5 stats file
# Streamline counters are organized per CPU
def readGem5Stats(stats, gem5_stats_file):
print "\n==============================="
print "Parsing gem5 stats file..."
print gem5_stats_file
print "===============================\n"
ext = os.path.splitext(gem5_stats_file)[1]
window_start_regex = \
re.compile("^---------- Begin Simulation Statistics ----------")
window_end_regex = \
re.compile("^---------- End Simulation Statistics ----------")
final_tick_regex = re.compile("^final_tick\s+(\d+)")
global ticks_in_ns
sim_freq_regex = re.compile("^sim_freq\s+(\d+)")
sim_freq = -1
try:
if ext == ".gz":
f = gzip.open(gem5_stats_file, "r")
else:
f = open(gem5_stats_file, "r")
except:
print "ERROR opening stats file", gem5_stats_file, "!"
sys.exit(1)
stats_not_found_list = stats.stats_list[:]
window_num = 0
while (True):
error = False
try:
line = f.readline()
except IOError:
print ""
print "WARNING: IO error in stats file"
print "(gzip stream not closed properly?)...continuing for now"
error = True
if not line:
break
# Find out how many gem5 ticks in 1ns
if sim_freq < 0:
m = sim_freq_regex.match(line)
if m:
sim_freq = int(m.group(1)) # ticks in 1 sec
ticks_in_ns = int(sim_freq / 1e9)
print "Simulation frequency found! 1 tick == %e sec\n" \
% (1.0 / sim_freq)
# Final tick in gem5 stats: current absolute timestamp
m = final_tick_regex.match(line)
if m:
tick = int(m.group(1))
if tick > end_tick:
break
stats.tick_list.append(tick)
if (window_end_regex.match(line) or error):
if args.verbose:
print "new window"
for stat in stats.stats_list:
if stat.per_cpu:
for i in range(num_cpus):
if not stat.per_cpu_found[i]:
if not stat.not_found_at_least_once:
print "WARNING: stat not found in window #", \
window_num, ":", stat.per_cpu_name[i]
print "suppressing further warnings for " + \
"this stat"
stat.not_found_at_least_once = True
stat.values[i].append(str(0))
stat.per_cpu_found[i] = False
else:
if not stat.found:
if not stat.not_found_at_least_once:
print "WARNING: stat not found in window #", \
window_num, ":", stat.name
print "suppressing further warnings for this stat"
stat.not_found_at_least_once = True
stat.values.append(str(0))
stat.found = False
stats_not_found_list = stats.stats_list[:]
window_num += 1
if error:
break
# Do a single regex of the union of all stats first for speed
if stats.regex.match(line):
# Then loop through only the stats we haven't seen in this window
for stat in stats_not_found_list[:]:
if stat.per_cpu:
for i in range(num_cpus):
m = stat.per_cpu_regex[i].match(line)
if m:
if stat.name == "ipc":
value = str(int(float(m.group(1)) * 1000))
else:
value = str(int(float(m.group(1))))
if args.verbose:
print stat.per_cpu_name[i], value
stat.values[i].append(value)
stat.per_cpu_found[i] = True
all_found = True
for j in range(num_cpus):
if not stat.per_cpu_found[j]:
all_found = False
if all_found:
stats_not_found_list.remove(stat)
if stat.description == "":
stat.description = m.group(2)
else:
m = stat.regex.match(line)
if m:
value = str(int(float(m.group(1))))
if args.verbose:
print stat.name, value
stat.values.append(value)
stat.found = True
stats_not_found_list.remove(stat)
if stat.description == "":
stat.description = m.group(2)
f.close()
# Create session.xml file in .apc folder
def doSessionXML(output_path):
session_file = output_path + "/session.xml"
xml = ET.Element("session")
xml.set("version", "1")
xml.set("call_stack_unwinding", "no")
xml.set("parse_debug_info", "no")
xml.set("high_resolution", "yes")
xml.set("buffer_mode", "streaming")
xml.set("sample_rate", "low")
# Setting duration to zero for now. Doesn't affect visualization.
xml.set("duration", "0")
xml.set("target_host", "")
xml.set("target_port", "8080")
writeXmlFile(xml, session_file)
# Create captured.xml file in .apc folder
def doCapturedXML(output_path, stats):
captured_file = output_path + "/captured.xml"
xml = ET.Element("captured")
xml.set("version", "1")
xml.set("protocol", "17")
xml.set("backtrace_processing", "none")
target = ET.SubElement(xml, "target")
target.set("name", "gem5")
target.set("sample_rate", "1000")
target.set("cores", str(num_cpus))
counters = ET.SubElement(xml, "counters")
for stat in stats.stats_list:
s = ET.SubElement(counters, "counter")
stat_name = re.sub("\.", "_", stat.short_name)
stat_name = re.sub("#", "", stat_name)
s.set("title", stat.group)
s.set("name", stat_name)
s.set("color", "0x00000000")
s.set("key", "0x%08x" % stat.key)
s.set("type", stat_name)
s.set("event", "0x00000000")
if stat.per_cpu:
s.set("per_cpu", "yes")
else:
s.set("per_cpu", "no")
s.set("display", "")
s.set("units", "")
s.set("average_selection", "no")
s.set("description", stat.description)
writeXmlFile(xml, captured_file)
# Writes out Streamline cookies (unique IDs per process/thread)
def writeCookiesThreads(blob):
thread_list = []
for process in process_list:
if process.uid > 0:
print "cookie", process.task_name, process.uid
writeBinary(blob, cookieNameFrame(process.uid, process.task_name))
# pid and tgid need to be positive values -- no longer true?
for thread in process.children:
thread_list.append(thread)
# Threads need to be sorted in timestamp order
thread_list.sort(key = lambda x: x.tick)
for thread in thread_list:
print "thread", thread.task_name, (ticksToNs(thread.tick)),\
thread.tgid, thread.pid
writeBinary(blob, threadNameFrame(ticksToNs(thread.tick),\
thread.pid, thread.task_name))
# Writes context switch info as Streamline scheduling events
def writeSchedEvents(blob):
for cpu in range(num_cpus):
for event in event_list[cpu]:
timestamp = ticksToNs(event.tick)
pid = event.task.tgid
tid = event.task.pid
if process_dict.has_key(event.task.tgid):
cookie = process_dict[event.task.tgid].uid
else:
cookie = 0
# State:
# 0: waiting on other event besides I/O
# 1: Contention/pre-emption
# 2: Waiting on I/O
# 3: Waiting on mutex
# Hardcoding to 0 for now. Other states not implemented yet.
state = 0
if args.verbose:
print cpu, timestamp, pid, tid, cookie
writeBinary(blob,\
schedSwitchFrame(cpu, timestamp, pid, tid, cookie, state))
# Writes selected gem5 statistics as Streamline counters
def writeCounters(blob, stats):
timestamp_list = []
for tick in stats.tick_list:
if tick > end_tick:
break
timestamp_list.append(ticksToNs(tick))
for stat in stats.stats_list:
if stat.per_cpu:
stat_length = len(stat.values[0])
else:
stat_length = len(stat.values)
for n in range(len(timestamp_list)):
for stat in stats.stats_list:
if stat.per_cpu:
for i in range(num_cpus):
writeBinary(blob, counterFrame(timestamp_list[n], i, \
stat.key, int(float(stat.values[i][n]))))
else:
writeBinary(blob, counterFrame(timestamp_list[n], 0, \
stat.key, int(float(stat.values[n]))))
# Streamline can display LCD frame buffer dumps (gzipped bmp)
# This function converts the frame buffer dumps to the Streamline format
def writeVisualAnnotations(blob, input_path, output_path):
frame_path = input_path + "/frames_system.vncserver"
if not os.path.exists(frame_path):
return
frame_count = 0
file_list = os.listdir(frame_path)
file_list.sort()
re_fb = re.compile("fb\.(\d+)\.(\d+)\.bmp.gz")
# Use first non-negative pid to tag visual annotations
annotate_pid = -1
for e in unified_event_list:
pid = e.task.pid
if pid >= 0:
annotate_pid = pid
break
for fn in file_list:
m = re_fb.match(fn)
if m:
seq = m.group(1)
tick = int(m.group(2))
if tick > end_tick:
break
frame_count += 1
userspace_body = []
userspace_body += packed32(0x1C) # escape code
userspace_body += packed32(0x04) # visual code
text_annotation = "image_" + str(ticksToNs(tick)) + ".bmp.gz"
userspace_body += int16(len(text_annotation))
userspace_body += utf8StringList(text_annotation)
if gzipped_bmp_supported:
# copy gzipped bmp directly
bytes_read = open(frame_path + "/" + fn, "rb").read()
else:
# copy uncompressed bmp
bytes_read = gzip.open(frame_path + "/" + fn, "rb").read()
userspace_body += int32(len(bytes_read))
userspace_body += bytes_read
writeBinary(blob, annotateFrame(0, annotate_pid, ticksToNs(tick), \
len(userspace_body), userspace_body))
print "\nfound", frame_count, "frames for visual annotation.\n"
def createApcProject(input_path, output_path, stats):
initOutput(output_path)
blob = open(output_path + "/0000000000", "wb")
# Summary frame takes current system time and system uptime.
# Filling in with random values for now.
writeBinary(blob, summaryFrame(1234, 5678))
writeCookiesThreads(blob)
print "writing Events"
writeSchedEvents(blob)
print "writing Counters"
writeCounters(blob, stats)
print "writing Visual Annotations"
writeVisualAnnotations(blob, input_path, output_path)
doSessionXML(output_path)
doCapturedXML(output_path, stats)
blob.close()
#######################
# Main Routine
input_path = args.input_path
output_path = args.output_path
####
# Make sure input path exists
####
if not os.path.exists(input_path):
print "ERROR: Input path %s does not exist!" % input_path
sys.exit(1)
####
# Parse gem5 configuration file to find # of CPUs and L2s
####
(num_cpus, num_l2) = parseConfig(input_path + "/config.ini")
####
# Parse task file to find process/thread info
####
parseProcessInfo(input_path + "/system.tasks.txt")
####
# Parse stat config file and register stats
####
stat_config_file = args.stat_config_file
stats = registerStats(stat_config_file)
####
# Parse gem5 stats
####
# Check if both stats.txt and stats.txt.gz exist and warn if both exist
if os.path.exists(input_path + "/stats.txt") and \
os.path.exists(input_path + "/stats.txt.gz"):
print "WARNING: Both stats.txt.gz and stats.txt exist. \
Using stats.txt.gz by default."
gem5_stats_file = input_path + "/stats.txt.gz"
if not os.path.exists(gem5_stats_file):
gem5_stats_file = input_path + "/stats.txt"
if not os.path.exists(gem5_stats_file):
print "ERROR: stats.txt[.gz] file does not exist in %s!" % input_path
sys.exit(1)
readGem5Stats(stats, gem5_stats_file)
####
# Create Streamline .apc project folder
####
createApcProject(input_path, output_path, stats)
print "All done!"
| kaiyuanl/gem5 | util/streamline/m5stats2streamline.py | Python | bsd-3-clause | 42,084 | [
"VisIt"
] | 2b9bc9aac676a08ead6cd3c135469cfffe4bb25e74d59d3810d9536ef6fa15ac |
#import sys, os
#sys.path.insert(0, '/media/nmsutton/Ext3Drive/General/NEST/NEST/lib64/python3.4/site-packages')
#print (os.path.dirname(sys.executable))
#print (os.environ)
import pylab
import nest
print ("test")
neuron = nest.Create("iaf_neuron")
nest.GetStatus(neuron)
nest.GetStatus(neuron, "I_e")
print (nest.GetStatus(neuron, ["V_reset", "V_th"]))
nest.SetStatus(neuron, {"I_e": 376.0})
multimeter = nest.Create("multimeter")
nest.SetStatus(multimeter, {"withtime":True, "record_from":["V_m"]})
nest.SetStatus(neuron, {"V_m": 376.0})
print (nest.GetStatus(neuron, "V_m"))
spikedetector = nest.Create("spike_detector",
params={"withgid": True, "withtime": True}) | nmsutton/MemoryModule | python_version/examples/nestTest.py | Python | mit | 693 | [
"NEURON"
] | 116d5d448f085debfd8d290233cf104c14341f60e9d3483cd06449145556c813 |
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
from pyscf import gto
from pyscf import scf
'''
Specify irrep_nelec to control the wave function symmetry
'''
mol = gto.Mole()
mol.build(
verbose = 0,
atom = '''
C 0. 0. 0.625
C 0. 0. -0.625 ''',
basis = 'cc-pVDZ',
spin = 0,
symmetry = True,
)
mf = scf.RHF(mol)
# Frozen occupancy
# 'A1g': 4 electrons
# 'E1gx': 2 electrons
# 'E1gy': 2 electrons
# Rest 4 electrons are put in irreps A1u, E1ux, E1uy ... based on Aufbau principle
# The irrep names can be found in pyscf/symm/param.py
mf.irrep_nelec = {'A1g': 4, 'E1gx': 2, 'E1gy': 2}
e = mf.kernel()
print('E = %.15g ref = -74.1112374269129' % e)
mol.symmetry = 'D2h'
mol.charge = 1
mol.spin = 1
mol.build(dump_input=False, parse_arg=False)
mf = scf.RHF(mol)
# Frozen occupancy
# 'Ag': 2 alpha, 1 beta electrons
# 'B1u': 4 electrons
# 'B2u': 2 electrons
# 'B3u': 2 electrons
mf.irrep_nelec = {'Ag': (2,1), 'B1u': 4, 'B2u': 2, 'B3u': 2,}
e = mf.kernel()
print('E = %.15g ref = -74.4026583773135' % e)
# Frozen occupancy
# 'Ag': 4 electrons
# 'B1u': 2 alpha, 1 beta electrons
# 'B2u': 2 electrons
# 'B3u': 2 electrons
mf.irrep_nelec = {'Ag': 4, 'B1u': (2,1), 'B2u': 2, 'B3u': 2,}
e = mf.kernel()
print('E = %.15g ref = -74.8971476600812' % e)
| gkc1000/pyscf | examples/scf/13-symmetry.py | Python | apache-2.0 | 1,324 | [
"PySCF"
] | 1e05657a8616f7299ec08fbcfae6f552af6f9075690569e62988ad36d5bfe0d1 |
"""
Acceptance tests for the teams feature.
"""
import json
import random
import ddt
from flaky import flaky
from nose.plugins.attrib import attr
from uuid import uuid4
from ..helpers import UniqueCourseTest
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
Thread,
MultipleThreadFixture
)
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.teams import TeamsPage, MyTeamsPage, BrowseTopicsPage, BrowseTeamsPage, CreateTeamPage, TeamPage
TOPICS_PER_PAGE = 12
class TeamsTabBase(UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, xrange(num_topics))]
def create_teams(self, topic, num_teams):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in xrange(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': 'Team {}'.format(i),
'description': 'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team),
headers=self.course_fixture.headers
)
teams.append(json.loads(response.text))
return teams
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_info_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_cards = page.team_cards
team_card_names = [
team_card.find_element_by_css_selector('.card-title').text
for team_card in team_cards.results
]
team_card_descriptions = [
team_card.find_element_by_css_selector('.card-description').text
for team_card in team_cards.results
]
map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions)
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
@ddt.ddt
@attr('shard_5')
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_not_enabled_not_enrolled(self):
"""
Scenario: teams tab should not be present if student is not enrolled in the course
Given there is a course with team configuration and topics
And I am not enrolled in that course, and am not global staff
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False
)
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr('shard_5')
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
@attr('shard_5')
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
browse_teams_page.click_create_team_link()
create_team_page = CreateTeamPage(self.browser, self.course_id, topic)
create_team_page.value_for_text_field(field_id='name', value='Team Name', press_enter=False)
create_team_page.value_for_textarea_field(
field_id='description',
value='Team description.'
)
create_team_page.submit_form()
team_page = TeamPage(self.browser, self.course_id)
self.assertTrue(team_page.is_browser_on_page)
team_page.click_all_topics_breadcrumb()
self.assertTrue(self.topics_page.is_browser_on_page())
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_cards[0].text
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertEqual(browse_teams_page.header_topic_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_topic_description, 'Description')
@attr('shard_5')
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_topic_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_topic_description, self.topic['description'])
def verify_on_page(self, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
page_num (int): The one-indexed page we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
alphabetized_teams = sorted(total_teams, key=lambda team: team['name'])
self.assertEqual(self.browse_teams_page.get_pagination_header_text(), pagination_header_text)
self.verify_teams(
self.browse_teams_page,
alphabetized_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
self.browse_teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertEqual(self.browse_teams_page.get_pagination_header_text(), 'Showing 0 out of 0 total')
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertEqual(self.browse_teams_page.get_pagination_header_text(), 'Showing 1-10 out of 10 total')
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(1, teams, 'Showing 1-10 out of 20 total', True)
def test_navigation_links(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with a team configuration and a topic
containing one team
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see the link to "browse all team"
And I should navigate to that link
And I see the relevant page loaded
And I should see the link to "search teams"
And I should navigate to that link
And I see the relevant page loaded
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.assertTrue(self.topics_page.is_browser_on_page())
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_search_team_link()
# TODO Add search page expectation once that implemented.
@attr('shard_5')
class CreateTeamTest(TeamsTabBase):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
self.create_team_page = CreateTeamPage(self.browser, self.course_id, self.topic)
self.team_name = 'Avengers'
def verify_page_header(self):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.create_team_page.header_page_name, 'Create a New Team')
self.assertEqual(
self.create_team_page.header_page_description,
'Create a new team if you can\'t find existing teams to join, '
'or if you would like to learn with friends you know.'
)
self.assertEqual(self.create_team_page.header_page_breadcrumbs, self.topic['name'])
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header()
def fill_create_form(self):
"""Fill the create team form fields with appropriate values."""
self.create_team_page.value_for_text_field(field_id='name', value=self.team_name, press_enter=False)
self.create_team_page.value_for_textarea_field(
field_id='description',
value='The Avengers are a fictional team of superheroes.'
)
self.create_team_page.value_for_dropdown_field(field_id='language', value='English')
self.create_team_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.assertEqual(
self.create_team_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.create_team_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.create_team_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.create_team_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
self.create_team_page.submit_form()
self.assertEqual(
self.create_team_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.create_team_page.error_for_field(field_id='name'))
self.assertTrue(self.create_team_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.create_team_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.create_team_page.submit_form()
self.assertEqual(
self.create_team_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.create_team_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
self.verify_and_navigate_to_create_team_page()
self.fill_create_form()
self.create_team_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.team_name)
self.assertEqual(team_page.team_description, 'The Avengers are a fictional team of superheroes.')
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertEqual(self.browse_teams_page.get_pagination_header_text(), 'Showing 0 out of 0 total')
self.verify_and_navigate_to_create_team_page()
self.create_team_page.cancel_team()
self.assertTrue(self.browse_teams_page.is_browser_on_page())
self.assertEqual(self.browse_teams_page.get_pagination_header_text(), 'Showing 0 out of 0 total')
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
@attr('shard_5')
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body."
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion = self.team_page.discussion_page
self.assertTrue(discussion.is_browser_on_page())
self.assertTrue(discussion.is_discussion_expanded())
self.assertEqual(discussion.get_num_displayed_threads(), 1)
self.assertTrue(discussion.has_thread(thread['id']))
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(discussion.q(css='.post-header-actions').present)
assertion(discussion.q(css='.add-response').present)
assertion(discussion.q(css='.new-post-btn').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assertTrue(self.team_page.join_team_button_present)
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
| zerobatu/edx-platform | common/test/acceptance/tests/lms/test_teams.py | Python | agpl-3.0 | 50,559 | [
"VisIt"
] | 8617aedf98c8d8b02d476a672f3dd751a0e4affd5c7a96f378223f10c7e26bad |
""" This handler basically provides a REST interface to interact with the OAuth 2 authentication server
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN Auth:
:end-before: ##END
:dedent: 2
:caption: Auth options
"""
import json
import pprint
from dominate import tags as dom
from DIRAC import gConfig
from DIRAC.Core.Tornado.Server.TornadoREST import location, TornadoREST
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getIdPForGroup, getGroupsForUser
from DIRAC.FrameworkSystem.private.authorization.AuthServer import AuthServer
from DIRAC.FrameworkSystem.private.authorization.utils.Requests import createOAuth2Request
from DIRAC.FrameworkSystem.private.authorization.grants.DeviceFlow import DeviceAuthorizationEndpoint
from DIRAC.FrameworkSystem.private.authorization.grants.RevokeToken import RevocationEndpoint
from DIRAC.FrameworkSystem.private.authorization.utils.Utilities import getHTML
class AuthHandler(TornadoREST):
# Authorization access to all methods handled by AuthServer instance
DEFAULT_AUTHENTICATION = ["JWT", "VISITOR"]
DEFAULT_AUTHORIZATION = "all"
DEFAULT_LOCATION = "/auth"
@classmethod
def initializeHandler(cls, serviceInfo):
"""This method is called only one time, at the first request
:param dict ServiceInfoDict: infos about services
"""
cls.server = AuthServer()
cls.server.LOCATION = cls.DEFAULT_LOCATION
def initializeRequest(self):
"""Called at every request"""
self.currentPath = self.request.protocol + "://" + self.request.host + self.request.path
@location(".well-known/(?:oauth-authorization-server|openid-configuration)")
def get_index(self):
"""Well known endpoint, specified by
`RFC8414 <https://tools.ietf.org/html/rfc8414#section-3>`_
Request examples::
GET: LOCATION/.well-known/openid-configuration
GET: LOCATION/.well-known/oauth-authorization-server
Responce::
HTTP/1.1 200 OK
Content-Type: application/json
{
"registration_endpoint": "https://domain.com/auth/register",
"userinfo_endpoint": "https://domain.com/auth/userinfo",
"jwks_uri": "https://domain.com/auth/jwk",
"code_challenge_methods_supported": [
"S256"
],
"grant_types_supported": [
"authorization_code",
"code",
"refresh_token"
],
"token_endpoint": "https://domain.com/auth/token",
"response_types_supported": [
"code",
"device",
"id_token token",
"id_token",
"token"
],
"authorization_endpoint": "https://domain.com/auth/authorization",
"issuer": "https://domain.com/auth"
}
"""
if self.request.method == "GET":
resDict = dict(
setups=gConfig.getSections("DIRAC/Setups").get("Value", []),
configuration_server=gConfig.getValue("/DIRAC/Configuration/MasterServer", ""),
)
resDict.update(self.server.metadata)
resDict.pop("Clients", None)
return resDict
def get_jwk(self):
"""JWKs endpoint
Request example::
GET LOCATION/jwk
Response::
HTTP/1.1 200 OK
Content-Type: application/json
{
"keys": [
{
"e": "AQAB",
"kty": "RSA",
"n": "3Vv5h5...X3Y7k"
}
]
}
"""
result = self.server.db.getKeySet()
return result["Value"].as_dict() if result["OK"] else {}
def post_revoke(self):
"""Revocation endpoint
Request example::
GET LOCATION/revoke
Response::
HTTP/1.1 200 OK
Content-Type: application/json
"""
self.log.verbose("Initialize a Device authentication flow.")
return self.server.create_endpoint_response(RevocationEndpoint.ENDPOINT_NAME, self.request)
def get_userinfo(self):
"""The UserInfo endpoint can be used to retrieve identity information about a user,
see `spec <https://openid.net/specs/openid-connect-core-1_0.html#UserInfo>`_
GET LOCATION/userinfo
Parameters:
+---------------+--------+---------------------------------+--------------------------------------------------+
| **name** | **in** | **description** | **example** |
+---------------+--------+---------------------------------+--------------------------------------------------+
| Authorization | header | Provide access token | Bearer jkagfbfd3r4ubf887gqduyqwogasd87 |
+---------------+--------+---------------------------------+--------------------------------------------------+
Request example::
GET LOCATION/userinfo
Authorization: Bearer <access_token>
Response::
HTTP/1.1 200 OK
Content-Type: application/json
{
"sub": "248289761001",
"name": "Bob Smith",
"given_name": "Bob",
"family_name": "Smith",
"group": [
"dirac_user",
"dirac_admin"
]
}
"""
return self.getRemoteCredentials()
def post_device(self, provider=None, user_code=None, client_id=None):
"""The device authorization endpoint can be used to request device and user codes.
This endpoint is used to start the device flow authorization process and user code verification.
POST LOCATION/device/<provider>?<query>
Parameters:
+----------------+--------+-------------------------------------------+---------------------------------------+
| **name** | **in** | **description** | **example** |
+----------------+--------+-------------------------------------------+---------------------------------------+
| user code | query | in the last step to confirm recived user | WE8R-WEN9 |
| | | code put it as query parameter (optional) | |
| | | It's possible to add it interactively. | |
+----------------+--------+-------------------------------------------+---------------------------------------+
| client_id | query | The public client ID | 3f6eNw0E6JGq1VuzRkpWUL9XTxhL86efZw |
+----------------+--------+-------------------------------------------+---------------------------------------+
| scope | query | list of scoupes separated by a space, to | g:dirac_user |
| | | add a group you must add "g:" before the | |
| | | group name | |
+----------------+--------+-------------------------------------------+---------------------------------------+
| provider | path | identity provider to autorize (optional) | CheckIn |
| | | It's possible to add it interactively. | |
+----------------+--------+-------------------------------------------+---------------------------------------+
Request example, to initialize a Device authentication flow::
POST LOCATION/device/CheckIn_dev?client_id=3f1DAj8z6eNw0E6JGq1Vu6efZwyV&scope=g:dirac_admin
Response::
HTTP/1.1 200 OK
Content-Type: application/json
{
"device_code": "TglwLiow0HUwowjB9aHH5HqH3bZKP9d420LkNhCEuR",
"verification_uri": "https://marosvn32.in2p3.fr/auth/device",
"interval": 5,
"expires_in": 1800,
"verification_uri_complete": "https://marosvn32.in2p3.fr/auth/device/WSRL-HJMR",
"user_code": "WSRL-HJMR"
}
Request example, to confirm the user code::
POST LOCATION/device/CheckIn_dev/WSRL-HJMR
Response::
HTTP/1.1 200 OK
"""
self.log.verbose("Initialize a Device authentication flow.")
return self.server.create_endpoint_response(DeviceAuthorizationEndpoint.ENDPOINT_NAME, self.request)
def get_device(self, provider=None, user_code=None, client_id=None):
"""The device authorization endpoint can be used to request device and user codes.
This endpoint is used to start the device flow authorization process and user code verification.
User code confirmation::
GET LOCATION/device/<provider>?user_code=<user code>
Response::
HTTP/1.1 200 OK
"""
if user_code:
# If received a request with a user code, then prepare a request to authorization endpoint.
self.log.verbose("User code verification.")
result = self.server.db.getSessionByUserCode(user_code)
if not result["OK"] or not result["Value"]:
return getHTML(
"session is expired.",
theme="warning",
body=result.get("Message"),
info="Seems device code flow authorization session %s expired." % user_code,
)
session = result["Value"]
# Get original request from session
req = createOAuth2Request(dict(method="GET", uri=session["uri"]))
req.setQueryArguments(id=session["id"], user_code=user_code)
# Save session to cookie and redirect to authorization endpoint
authURL = "%s?%s" % (req.path.replace("device", "authorization"), req.query)
return self.server.handle_response(302, {}, [("Location", authURL)], session)
# If received a request without a user code, then send a form to enter the user code
with dom.div(cls="row mt-5 justify-content-md-center") as tag:
with dom.div(cls="col-auto"):
dom.div(
dom.form(
dom._input(type="text", name="user_code"),
dom.button("Submit", type="submit", cls="btn btn-submit"),
action=self.currentPath,
method="GET",
),
cls="card",
)
return getHTML(
"user code verification..",
body=tag,
icon="ticket-alt",
info="Device flow required user code. You will need to type user code to continue.",
)
def get_authorization(self, provider=None, **kwargs):
"""Authorization endpoint
GET: LOCATION/authorization/<provider>
Parameters:
+----------------+--------+-------------------------------------------+---------------------------------------+
| **name** | **in** | **description** | **example** |
+----------------+--------+-------------------------------------------+---------------------------------------+
| response_type | query | informs of the desired grant type | code |
+----------------+--------+-------------------------------------------+---------------------------------------+
| client_id | query | The client ID | 3f6eNw0E6JGq1VuzRkpWUL9XTxhL86efZw |
+----------------+--------+-------------------------------------------+---------------------------------------+
| scope | query | list of scoupes separated by a space, to | g:dirac_user |
| | | add a group you must add "g:" before the | |
| | | group name | |
+----------------+--------+-------------------------------------------+---------------------------------------+
| provider | path | identity provider to autorize (optional) | CheckIn |
| | | It's possible to add it interactively. | |
+----------------+--------+-------------------------------------------+---------------------------------------+
General options:
provider -- identity provider to autorize
Device flow:
&user_code=.. (required)
Authentication code flow:
&scope=.. (optional)
&redirect_uri=.. (optional)
&state=.. (main session id, optional)
&code_challenge=.. (PKCE, optional)
&code_challenge_method=(pain|S256) ('pain' by default, optional)
"""
return self.server.validate_consent_request(self.request, provider)
def get_redirect(self, state, error=None, error_description="", chooseScope=[]):
"""Redirect endpoint.
After a user successfully authorizes an application, the authorization server will redirect
the user back to the application with either an authorization code or access token in the URL.
The full URL of this endpoint must be registered in the identity provider.
Read more in `oauth.com <https://www.oauth.com/oauth2-servers/redirect-uris/>`_.
Specified by `RFC6749 <https://tools.ietf.org/html/rfc6749#section-3.1.2>`_.
GET LOCATION/redirect
:param str state: Current IdP session state
:param str error: IdP error response
:param str error_description: error description
:param list chooseScope: to specify new scope(group in our case) (optional)
:return: S_OK()/S_ERROR()
"""
# Check current auth session that was initiated for the selected external identity provider
session = self.get_secure_cookie("auth_session")
if not session:
return self.server.handle_response(
payload=getHTML(
"session is expired.",
theme="warning",
state=400,
info="Seems %s session is expired, please, try again." % state,
),
delSession=True,
)
sessionWithExtIdP = json.loads(session)
if state and not sessionWithExtIdP.get("state") == state:
return self.server.handle_response(
payload=getHTML(
"session is expired.",
theme="warning",
state=400,
info="Seems %s session is expired, please, try again." % state,
),
delSession=True,
)
# Try to catch errors if the authorization on the selected identity provider was unsuccessful
if error:
provider = sessionWithExtIdP.get("Provider")
return self.server.handle_response(
payload=getHTML(
error,
theme="error",
body=error_description,
info="Seems %s session is failed on the %s's' side." % (state, provider),
),
delSession=True,
)
if not sessionWithExtIdP.get("authed"):
# Parse result of the second authentication flow
self.log.info("%s session, parsing authorization response:\n" % state, self.request.uri)
result = self.server.parseIdPAuthorizationResponse(self.request, sessionWithExtIdP)
if not result["OK"]:
if result["Message"].startswith("<!DOCTYPE html>"):
return self.server.handle_response(payload=result["Message"], delSession=True)
return self.server.handle_response(
payload=getHTML("server error", state=500, info=result["Message"]), delSession=True
)
# Return main session flow
sessionWithExtIdP["authed"] = result["Value"]
# Research group
grant_user, response = self.__researchDIRACGroup(sessionWithExtIdP, chooseScope, state)
if not grant_user:
return response
# RESPONSE to basic DIRAC client request
resp = self.server.create_authorization_response(response, grant_user)
if isinstance(resp.payload, str) and not resp.payload.startswith("<!DOCTYPE html>"):
resp.payload = getHTML("authorization response", state=resp.status_code, body=resp.payload)
return resp
def post_token(self):
"""The token endpoint, the description of the parameters will differ depending on the selected grant_type
POST LOCATION/token
Parameters:
+----------------+--------+-------------------------------+---------------------------------------------------+
| **name** | **in** | **description** | **example** |
+----------------+--------+-------------------------------+---------------------------------------------------+
| grant_type | query | grant type to use | urn:ietf:params:oauth:grant-type:device_code |
+----------------+--------+-------------------------------+---------------------------------------------------+
| client_id | query | The public client ID | 3f1DAj8z6eNw0E6JGq1VuzRkpWUL9XTxhL86efZw |
+----------------+--------+-------------------------------+---------------------------------------------------+
| device_code | query | device code | uW5xL4hr2tqwBPKL5d0JO9Fcc67gLqhJsNqYTSp |
+----------------+--------+-------------------------------+---------------------------------------------------+
:mod:`Supported grant types <DIRAC.FrameworkSystem.private.authorization.grants>`
Request example::
POST LOCATION/token?client_id=L86..yV&grant_type=urn:ietf:params:oauth:grant-type:device_code&device_code=uW5
Response::
HTTP/1.1 400 OK
Content-Type: application/json
{
"error": "authorization_pending"
}
"""
return self.server.create_token_response(self.request)
def __researchDIRACGroup(self, extSession, chooseScope, state):
"""Research DIRAC groups for authorized user
:param dict extSession: ended authorized external IdP session
:return: -- will return (None, response) to provide error or group selector
will return (grant_user, request) to contionue authorization with choosed group
"""
# Base DIRAC client auth session
firstRequest = createOAuth2Request(extSession["firstRequest"])
# Read requested groups by DIRAC client or user
firstRequest.addScopes(chooseScope)
# Read already authed user
username = extSession["authed"]["username"]
# Requested arguments in first request
provider = firstRequest.provider
self.log.debug("Next groups has been found for %s:" % username, ", ".join(firstRequest.groups))
# Researche Group
result = getGroupsForUser(username)
if not result["OK"]:
return None, self.server.handle_response(
payload=getHTML("server error", theme="error", info=result["Message"]), delSession=True
)
groups = result["Value"]
validGroups = [
group for group in groups if (getIdPForGroup(group) == provider) or ("proxy" in firstRequest.scope)
]
if not validGroups:
return None, self.server.handle_response(
payload=getHTML(
"groups not found.",
theme="error",
info=f"No groups found for {username} and for {provider} Identity Provider.",
),
delSession=True,
)
self.log.debug("The state of %s user groups has been checked:" % username, pprint.pformat(validGroups))
# If group already defined in first request, just return it
if firstRequest.groups:
return extSession["authed"], firstRequest
# If not and we found only one valid group, apply this group
if len(validGroups) == 1:
firstRequest.addScopes(["g:%s" % validGroups[0]])
return extSession["authed"], firstRequest
# Else give user chanse to choose group in browser
with dom.div(cls="row mt-5 justify-content-md-center align-items-center") as tag:
for group in sorted(validGroups):
vo, gr = group.split("_")
with dom.div(cls="col-auto p-2").add(dom.div(cls="card shadow-lg border-0 text-center p-2")):
dom.h4(vo.upper() + " " + gr, cls="p-2")
dom.a(href="%s?state=%s&chooseScope=g:%s" % (self.currentPath, state, group), cls="stretched-link")
html = getHTML(
"group selection..",
body=tag,
icon="users",
info="Dirac use groups to describe permissions. " "You will need to select one of the groups to continue.",
)
return None, self.server.handle_response(payload=html, newSession=extSession)
| DIRACGrid/DIRAC | src/DIRAC/FrameworkSystem/API/AuthHandler.py | Python | gpl-3.0 | 22,010 | [
"DIRAC"
] | ecf64c9f29117fa8d8b937412d19da8ae573d06a50ac2b582c2c2777a69654c1 |
# Copyright (C) 2012,2013,2015
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
************************
espressopp.ParticleGroup
************************
.. function:: espressopp.ParticleGroup(storage)
:param storage:
:type storage:
.. function:: espressopp.ParticleGroup.add(pid)
:param pid:
:type pid:
:rtype:
.. function:: espressopp.ParticleGroup.has(pid)
:param pid:
:type pid:
:rtype:
.. function:: espressopp.ParticleGroup.show()
:rtype:
.. function:: espressopp.ParticleGroup.size()
:rtype:
"""
import _espressopp
import esutil
import pmi
from espressopp.esutil import cxxinit
class ParticleGroupLocal(_espressopp.ParticleGroup):
def __init__(self, storage):
if pmi.workerIsActive():
cxxinit(self, _espressopp.ParticleGroup, storage)
def add(self, pid):
if pmi.workerIsActive():
self.cxxclass.add(self, pid)
def show(self):
if pmi.workerIsActive():
self.cxxclass.show(self)
def has(self, pid):
if pmi.workerIsActive():
return self.cxxclass.has(self, pid)
def size(self):
if pmi.workerIsActive():
return self.cxxclass.size(self)
if pmi.isController:
class ParticleGroup(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.ParticleGroupLocal',
pmicall = [ "add", "show", "has", "size" ]
)
| fedepad/espressopp | src/ParticleGroup.py | Python | gpl-3.0 | 2,244 | [
"ESPResSo"
] | aeff8bcf0c31118ec17b5df32e97cdb06e59c28f589eb11f1dcab4b7b6a53595 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from collections import OrderedDict
from os import linesep as NL
from python_util.options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={}):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
if dic['pool'] == 'avg':
dic['sum'] = mcp.safe_get_bool(name, 'sum', default=False)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.crossent': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True)],
key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
| hgaspar/cuda-convnet2 | layer.py | Python | apache-2.0 | 82,481 | [
"Gaussian",
"NEURON"
] | 8deb05a78844813bf5d2758584c3179192e3f25d4f7ed212856973883fa574e9 |
#!/usr/bin/python
import pymol
from pymol import stored
from pymol import cmd, CmdException
cmd=pymol.cmd
import export_to_gl as glmol
def out_atoms(modelName):
#print modelName
| S-John-S/MAT | sb_script_gl.py | Python | mit | 181 | [
"PyMOL"
] | 0c076cb4abb60cc00faaf67f180414675a101b0e9a44bb5e7e656b1b44f8916e |
from collections import defaultdict
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def serialize(root):
if root is None:
return '#'
q = [root]
i, j = 0, 0
while i <= j:
node = q[i]
i += 1
if node is not None:
q.extend([node.left, node.right])
j += 2
while q[-1] is None:
q.pop()
res = ['#' if node is None else str(node.val) for node in q]
return ','.join(res)
def deserialize(data):
if data == '#':
return None
nodes = [None if x == '#' else TreeNode(x) for x in data.split(',')]
i, j = 0, 0
n = len(nodes)
while i < n and j < n:
j += 1
if j < n:
nodes[i].left = nodes[j]
j += 1
if j < n:
nodes[i].right = nodes[j]
i += 1
while i < n and nodes[i] is None:
i += 1
return nodes[0]
class Solution(object):
def findDuplicateSubtrees(self, root):
"""
:type root: TreeNode
:rtype: List[TreeNode]
"""
if not root:
return []
tree_map = defaultdict(list)
def visit(node):
if node is None:
return 'null'
left = visit(node.left)
right = visit(node.right)
res = '%s,%s,%s' % (node.val, left, right)
tree_map[res].append(node)
return res
visit(root)
return [v[0] for v in tree_map.values() if len(v) > 1]
if __name__ == "__main__":
sol = Solution()
root = deserialize('0,0,0,0,#,#,0,#,#,#,0')
res = sol.findDuplicateSubtrees(root)
for r in res:
print(serialize(r))
| shenfei/oj_codes | leetcode/python/n652_Find_Duplicate_Subtrees.py | Python | mit | 1,746 | [
"VisIt"
] | 5b5df5dbc6cc2d991e970140bcec388fc063ff4d5b72a1b760a911eb3a9a620b |
'''
DQN approach for different RL problems
as part of the basic series on reinforcement learning @
https://github.com/vmayoral/basic_reinforcement_learning
FIXME:
- ANN not performing at all
Inspired by
- https://gym.openai.com/evaluations/eval_kWknKOkPQ7izrixdhriurA
- http://outlace.com/Reinforcement-Learning-Part-3/
@author: Victor Mayoral Vilches <[email protected]>
'''
import gym
import numpy
import random
import pandas
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop, SGD
monitor = False
class QLearn:
def __init__(self, actions, epsilon, alpha, gamma):
self.q = {}
self.epsilon = epsilon # exploration constant
self.alpha = alpha # discount constant
self.gamma = gamma # discount factor
self.actions = actions
def getQ(self, state, action):
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
'''
Q-learning:
Q(s, a) += alpha * (reward(s,a) + max(Q(s') - Q(s,a))
'''
oldv = self.q.get((state, action), None)
if oldv is None:
self.q[(state, action)] = reward
else:
self.q[(state, action)] = oldv + self.alpha * (value - oldv)
def chooseAction(self, state, return_q=False):
q = [self.getQ(state, a) for a in self.actions]
maxQ = max(q)
if random.random() < self.epsilon:
minQ = min(q); mag = max(abs(minQ), abs(maxQ))
# add random values to all the actions, recalculate maxQ
q = [q[i] + random.random() * mag - .5 * mag for i in range(len(self.actions))]
maxQ = max(q)
count = q.count(maxQ)
# In case there're several state-action max values
# we select a random one among them
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
if return_q: # if they want it, give it!
return action, q
return action
def learn(self, state1, action1, reward, state2):
maxqnew = max([self.getQ(state2, a) for a in self.actions])
self.learnQ(state1, action1, reward, reward + self.gamma*maxqnew)
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return numpy.digitize(x=[value], bins=bins)[0]
class DQN:
def __init__(self, actions, epsilon, alpha, gamma):
# instead of a dictionary, we'll be using
# a neural network
# self.q = {}
self.epsilon = epsilon # exploration constant
self.alpha = alpha # discount constant
self.gamma = gamma # discount factor
self.actions = actions
# Build the neural network
self.network = Sequential()
# self.network.add(Dense(100, init='lecun_uniform', input_shape=(4,)))
self.network.add(Dense(100, init='lecun_uniform', input_shape=(4,)))
self.network.add(Activation('relu'))
# self.network.add(Activation('tanh'))
# self.network.add(Dropout(0.2))
self.network.add(Dense(80, init='lecun_uniform'))
self.network.add(Activation('relu'))
# # self.network.add(Activation('tanh'))
# # self.network.add(Dropout(0.2))
self.network.add(Dense(2, init='lecun_uniform'))
self.network.add(Activation('linear')) #linear output so we can have range of real-valued outputs
# rms = RMSprop()
rms = SGD()
self.network.compile(loss='mse', optimizer=rms)
# Get a summary of the network
self.network.summary()
def learnQ(self, state, action, reward, newState, terminal=False):
'''
DQN learning:
Instead of the Q-learning:
Q(s, a) += alpha * (reward(s,a) + gamma * max(Q(s') - Q(s,a))
we'll be updating the network following:
target = reward(s,a) + gamma * max(Q(s')
'''
# oldv = self.q.get((state, action), None)
# if oldv is None:
# self.q[(state, action)] = reward
# else:
# self.q[(state, action)] = oldv + self.alpha * (value - oldv)
state = numpy.asarray(state)
state = state.reshape(1,4)
newState = numpy.asarray(newState)
newState = newState.reshape(1,4)
qval = self.network.predict(state, batch_size=1)
newQ = self.network.predict(newState, batch_size=1)
# if (qval==newQ).all():
# pass
# else:
# print("NOT EQUAL!")
maxNewQ = numpy.max(newQ)
y = numpy.zeros((1,2))
y[:] = qval[:]
if terminal:
newReward = reward
else:
newReward = (reward + (self.gamma * maxNewQ))
y[0][action] = newReward #target output
self.network.fit(state, y, batch_size=1, nb_epoch=1, verbose=0)
# print("\tstate: "+str(state))
# print("\tnewState: "+str(newState))
# print("\taction: "+str(action))
# print("\tqval: "+str(qval))
# print("\tnewQval: "+str(newQ))
# print("\treward: "+str(reward))
# print("\tnewReward: "+str(newReward))
# print("\ty: "+str(y))
def chooseAction(self, state, return_q=False):
if (random.random() < self.epsilon): #choose random action
action = numpy.random.randint(0,2)
else: #choose best action from Q(s,a) values
# convert to a numpy array
state = numpy.asarray(state)
state = state.reshape(1,4)
# Let's run our Q function on state "state" to get Q values for all possible actions
qvals = self.network.predict(state, batch_size=1)
# Select the neuron that fired the most
action = numpy.argmax(qvals)
q = qvals[0][action]
# if return_q: # if they want it, give it!
# return action, q
return action
def to_bin(value, bins):
return numpy.digitize(x=[value], bins=bins)[0]
if __name__ == '__main__':
env = gym.make('CartPole-v0')
if monitor:
env.monitor.start('/tmp/cartpole-experiment-1', force=True)
# video_callable=lambda count: count % 10 == 0)
epochs = 500
goal_average_steps = 195
max_number_of_steps = 200
last_time_steps = numpy.ndarray(0)
# Discretization of the space
n_bins = 10
n_bins_angle = 10
number_of_features = env.observation_space.shape[0]
last_time_steps = numpy.ndarray(0)
# Number of states is huge so in order to simplify the situation
# we discretize the space to: 10 ** number_of_features
cart_position_bins = pandas.cut([-2.4, 2.4], bins=n_bins, retbins=True)[1][1:-1]
pole_angle_bins = pandas.cut([-2, 2], bins=n_bins_angle, retbins=True)[1][1:-1]
cart_velocity_bins = pandas.cut([-1, 1], bins=n_bins, retbins=True)[1][1:-1]
angle_rate_bins = pandas.cut([-3.5, 3.5], bins=n_bins_angle, retbins=True)[1][1:-1]
# The Deep Q-learn algorithm
dqn = DQN(actions=range(env.action_space.n),
alpha=0.5, gamma=0.90, epsilon=0.99)
# The Q-learn algorithm
qlearn = QLearn(actions=range(env.action_space.n),
alpha=0.5, gamma=0.90, epsilon=0.1)
for i_episode in xrange(epochs):
observation = env.reset()
cart_position, pole_angle, cart_velocity, angle_rate_of_change = observation
state = build_state([to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
state_raw = [to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)]
cumulated_reward = 0
for t in xrange(max_number_of_steps):
# env.render()
# Pick an action based on the current state
action_qlearn = qlearn.chooseAction(state)
action_dqn = dqn.chooseAction(state_raw)
# print("\t\tdqn: "+str(action_dqn))
# print("\t\tqlearn: "+str(action_qlearn))
# action = action_qlearn
action = action_dqn
# Execute the action and get feedback
observation, reward, done, info = env.step(action)
# Digitize the observation to get a state
cart_position, pole_angle, cart_velocity, angle_rate_of_change = observation
nextState = build_state([to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)])
nextState_raw = [to_bin(cart_position, cart_position_bins),
to_bin(pole_angle, pole_angle_bins),
to_bin(cart_velocity, cart_velocity_bins),
to_bin(angle_rate_of_change, angle_rate_bins)]
# # If out of bounds
# if (cart_position > 2.4 or cart_position < -2.4):
# reward = -200
# dqn.learn(state, action, reward, nextState)
# print("Out of bounds, reseting")
# break
if not(done):
dqn.learnQ(state_raw, action, reward, nextState_raw)
qlearn.learn(state, action, reward, nextState)
state = nextState
cumulated_reward += reward
else:
# Q-learn stuff
reward = -200
dqn.learnQ(state_raw, action, reward, nextState_raw, done)
qlearn.learn(state, action, reward, nextState)
last_time_steps = numpy.append(last_time_steps, [int(t + 1)])
cumulated_reward += reward
if dqn.epsilon > 0.1:
dqn.epsilon = dqn.epsilon - (1.0/epochs)
# print(dqn.epsilon)
break
print("Episode {:d} reward score: {:0.2f}".format(i_episode, cumulated_reward))
l = last_time_steps.tolist()
l.sort()
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
if monitor:
env.monitor.close()
# gym.upload('/tmp/cartpole-experiment-1', algorithm_id='vmayoral simple Q-learning', api_key='your-key') | vmayoral/basic_reinforcement_learning | tutorial5/tests/dq-learning-gym.py | Python | gpl-3.0 | 11,084 | [
"NEURON"
] | c0e9cf990ff631b2cd011818812dcf7a2650dd5714c436f61b8292ac675d5a74 |
from datetime import timedelta, datetime
from tapiriik.database import cachedb
from tapiriik.database.tz import TZLookup
import hashlib
import pytz
class ActivityType: # taken from RK API docs. The text values have no meaning except for debugging
Running = "Running"
Cycling = "Cycling"
MountainBiking = "MtnBiking"
Walking = "Walking"
Hiking = "Hiking"
DownhillSkiing = "DownhillSkiing"
CrossCountrySkiing = "XCSkiing"
Snowboarding = "Snowboarding"
Skating = "Skating"
Swimming = "Swimming"
Wheelchair = "Wheelchair"
Rowing = "Rowing"
Elliptical = "Elliptical"
Gym = "Gym"
Climbing = "Climbing"
Other = "Other"
def List():
# I'd argue that this is marginally better than the 200+ char hardcoded list it's replacing
type_list = []
for key, value in ActivityType.__dict__.items():
if type(value) is str and "__" not in key:
type_list.append(value)
return type_list
# The right-most element is the "most specific."
_hierarchy = [
[Cycling, MountainBiking],
[Running, Walking, Hiking]
]
def PickMostSpecific(types):
types = [x for x in types if x and x is not ActivityType.Other]
if len(types) == 0:
return ActivityType.Other
most_specific = types[0]
for definition in ActivityType._hierarchy:
if len([x for x in types if x in definition]) == len(types):
for act_type in types:
if definition.index(most_specific) < definition.index(act_type):
most_specific = act_type
return most_specific
def AreVariants(types):
for definition in ActivityType._hierarchy:
if len([x for x in types if x in definition]) == len(types):
return True
return False
class Activity:
def __init__(self, startTime=None, endTime=None, actType=ActivityType.Other, distance=None, name=None, notes=None, tz=None, lapList=None, private=False, fallbackTz=None, stationary=None, gps=None, device=None):
self.StartTime = startTime
self.EndTime = endTime
self.Type = actType
self.Laps = lapList if lapList is not None else []
self.Stats = ActivityStatistics(distance=distance)
self.TZ = tz
self.FallbackTZ = fallbackTz
self.Name = name
self.Notes = notes
self.Private = private
self.Stationary = stationary
self.GPS = gps
self.PrerenderedFormats = {}
self.Device = device
def CalculateUID(self):
if not self.StartTime:
return # don't even try
csp = hashlib.new("md5")
roundedStartTime = self.StartTime
roundedStartTime = roundedStartTime - timedelta(microseconds=roundedStartTime.microsecond)
if self.TZ:
roundedStartTime = roundedStartTime.astimezone(self.TZ)
csp.update(roundedStartTime.strftime("%Y-%m-%d %H:%M:%S").encode('utf-8')) # exclude TZ for compat
self.UID = csp.hexdigest()
def CountTotalWaypoints(self):
return sum([len(x.Waypoints) for x in self.Laps])
def GetFlatWaypoints(self):
return [wp for waypoints in [x.Waypoints for x in self.Laps] for wp in waypoints]
def GetFirstWaypointWithLocation(self):
loc_wp = None
for lap in self.Laps:
for wp in lap.Waypoints:
if wp.Location is not None and wp.Location.Latitude is not None and wp.Location.Longitude is not None:
loc_wp = wp.Location
break
return loc_wp
def DefineTZ(self):
""" run localize() on all contained dates to tag them with the activity TZ (doesn't change values) """
if self.TZ is None:
raise ValueError("TZ not set")
if self.StartTime and self.StartTime.tzinfo is None:
self.StartTime = self.TZ.localize(self.StartTime)
if self.EndTime and self.EndTime.tzinfo is None:
self.EndTime = self.TZ.localize(self.EndTime)
for lap in self.Laps:
lap.StartTime = self.TZ.localize(lap.StartTime) if lap.StartTime.tzinfo is None else lap.StartTime
lap.EndTime = self.TZ.localize(lap.EndTime) if lap.EndTime.tzinfo is None else lap.EndTime
for wp in lap.Waypoints:
if wp.Timestamp.tzinfo is None:
wp.Timestamp = self.TZ.localize(wp.Timestamp)
self.CalculateUID()
def AdjustTZ(self):
""" run astimezone() on all contained dates to align them with the activity TZ (requires non-naive DTs) """
if self.TZ is None:
raise ValueError("TZ not set")
self.StartTime = self.StartTime.astimezone(self.TZ)
self.EndTime = self.EndTime.astimezone(self.TZ)
for lap in self.Laps:
lap.StartTime = lap.StartTime.astimezone(self.TZ)
lap.EndTime = lap.EndTime.astimezone(self.TZ)
for wp in lap.Waypoints:
wp.Timestamp = wp.Timestamp.astimezone(self.TZ)
self.CalculateUID()
def CalculateTZ(self, loc=None, recalculate=False):
if self.TZ and not recalculate:
return self.TZ
if loc is None:
loc = self.GetFirstWaypointWithLocation()
if loc is None and self.FallbackTZ is None:
raise Exception("Can't find TZ without a waypoint with a location, or a fallback TZ")
if loc is None:
# At this point, we'll resort to the fallback TZ.
if self.FallbackTZ is None:
raise Exception("Can't find TZ without a waypoint with a location, specified location, or fallback TZ")
self.TZ = self.FallbackTZ
return self.TZ
# I guess at some point it will be faster to perform a full lookup than digging through this table.
res = TZLookup(loc.Latitude, loc.Longitude)
cachedTzData = {"TZ": res, "Latitude": loc.Latitude, "Longitude": loc.Longitude}
cachedb.tz_cache.insert(cachedTzData)
if type(cachedTzData["TZ"]) != str:
self.TZ = pytz.FixedOffset(cachedTzData["TZ"] * 60)
else:
self.TZ = pytz.timezone(cachedTzData["TZ"])
return self.TZ
def EnsureTZ(self, recalculate=False):
self.CalculateTZ(recalculate=recalculate)
if self.StartTime.tzinfo is None:
self.DefineTZ()
else:
self.AdjustTZ()
def CheckSanity(self):
""" Started out as a function that checked to make sure the activity itself is sane.
Now we perform a lot of checks to make sure all the objects were initialized properly
I'm undecided on this front...
- Forcing the .NET model of "XYZCollection"s that enforce integrity seems wrong
- Enforcing them in constructors makes using the classes a pain
"""
if "ServiceDataCollection" in self.__dict__:
srcs = self.ServiceDataCollection # this is just so I can see the source of the activity in the exception message
if len(self.Laps) == 0:
raise ValueError("No laps")
wptCt = sum([len(x.Waypoints) for x in self.Laps])
if self.Stationary is None:
raise ValueError("Activity is undecidedly stationary")
if self.GPS is None:
raise ValueError("Activity is undecidedly GPS-tracked")
if not self.Stationary:
if wptCt == 0:
raise ValueError("Exactly 0 waypoints")
if wptCt == 1:
raise ValueError("Only 1 waypoint")
if self.Stats.Distance.Value is not None and self.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value > 1000 * 1000:
raise ValueError("Exceedingly long activity (distance)")
if self.StartTime.replace(tzinfo=None) > (datetime.now() + timedelta(days=5)):
raise ValueError("Activity is from the future")
if self.StartTime.replace(tzinfo=None) < datetime(1995, 1, 1):
raise ValueError("Activity falls implausibly far in the past")
if self.EndTime and self.EndTime.replace(tzinfo=None) > (datetime.now() + timedelta(days=5 + 5)): # Based on the 5-day activity length limit imposed later.
raise ValueError("Activity ends in the future")
if self.StartTime and self.EndTime:
# We can only do these checks if the activity has both start and end times (Dropbox)
if (self.EndTime - self.StartTime).total_seconds() < 0:
raise ValueError("Event finishes before it starts")
if (self.EndTime - self.StartTime).total_seconds() == 0:
raise ValueError("0-duration activity")
if (self.EndTime - self.StartTime).total_seconds() > 60 * 60 * 24 * 5:
raise ValueError("Exceedingly long activity (time)")
if len(self.Laps) == 1:
if self.Laps[0].Stats != self.Stats:
raise ValueError("Activity with 1 lap has mismatching statistics between activity and lap")
altLow = None
altHigh = None
pointsWithLocation = 0
unpausedPoints = 0
for lap in self.Laps:
if not lap.StartTime:
raise ValueError("Lap has no start time")
if not lap.EndTime:
raise ValueError("Lap has no end time")
for wp in lap.Waypoints:
if wp.Type != WaypointType.Pause:
unpausedPoints += 1
if wp.Location:
if wp.Location.Latitude == 0 and wp.Location.Longitude == 0:
raise ValueError("Invalid lat/lng")
if (wp.Location.Latitude is not None and (wp.Location.Latitude > 90 or wp.Location.Latitude < -90)) or (wp.Location.Longitude is not None and (wp.Location.Longitude > 180 or wp.Location.Longitude < -180)):
raise ValueError("Out of range lat/lng")
if wp.Location.Altitude is not None and (altLow is None or wp.Location.Altitude < altLow):
altLow = wp.Location.Altitude
if wp.Location.Altitude is not None and (altHigh is None or wp.Location.Altitude > altHigh):
altHigh = wp.Location.Altitude
if wp.Location and wp.Location.Latitude is not None and wp.Location.Longitude is not None:
pointsWithLocation += 1
if unpausedPoints == 1:
raise ValueError("0 < n <= 1 unpaused points in activity")
if pointsWithLocation == 1:
raise ValueError("0 < n <= 1 geographic points in activity") # Make RK happy
if altLow is not None and altLow == altHigh and altLow == 0: # some activities have very sporadic altitude data, we'll let it be...
raise ValueError("Invalid altitudes / no change from " + str(altLow))
# Gets called a bit later than CheckSanity, meh
def CheckTimestampSanity(self):
out_of_bounds_leeway = timedelta(minutes=10)
if self.StartTime.tzinfo != self.TZ:
raise ValueError("Activity StartTime TZ mismatch - %s master vs %s instance" % (self.TZ, self.StartTime.tzinfo))
if self.EndTime.tzinfo != self.TZ:
raise ValueError("Activity EndTime TZ mismatch - %s master vs %s instance" % (self.TZ, self.EndTime.tzinfo))
for lap in self.Laps:
if lap.StartTime.tzinfo != self.TZ:
raise ValueError("Lap StartTime TZ mismatch - %s master vs %s instance" % (self.TZ, lap.StartTime.tzinfo))
if lap.EndTime.tzinfo != self.TZ:
raise ValueError("Lap EndTime TZ mismatch - %s master vs %s instance" % (self.TZ, lap.EndTime.tzinfo))
for wp in lap.Waypoints:
if wp.Timestamp.tzinfo != self.TZ:
raise ValueError("Waypoint TZ mismatch - %s master vs %s instance" % (self.TZ, wp.Timestamp.tzinfo))
if lap.StartTime - wp.Timestamp > out_of_bounds_leeway:
raise ValueError("Waypoint occurs too far before lap")
if wp.Timestamp - lap.EndTime > out_of_bounds_leeway:
raise ValueError("Waypoint occurs too far after lap")
if self.StartTime - wp.Timestamp > out_of_bounds_leeway:
raise ValueError("Waypoint occurs too far before activity")
if wp.Timestamp - self.EndTime > out_of_bounds_leeway:
raise ValueError("Waypoint occurs too far after activity")
if self.StartTime - lap.StartTime > out_of_bounds_leeway:
raise ValueError("Lap starts too far before activity")
if lap.EndTime - self.EndTime > out_of_bounds_leeway:
raise ValueError("Lap ends too far after activity")
def CleanStats(self):
"""
Some devices/apps populate fields with patently false values, e.g. HR avg = 1bpm, calories = 0kcal
So, rather than propagating these, or bailing, we silently strip them, in hopes that destinations will do a better job of calculating them.
Most of the upper limits match the FIT spec
"""
def _cleanStatsObj(stats):
ranges = {
"Power": [ActivityStatisticUnit.Watts, 0, 5000],
"Speed": [ActivityStatisticUnit.KilometersPerHour, 0, 150],
"Elevation": [ActivityStatisticUnit.Meters, -500, 8850], # Props for bringing your Forerunner up Everest
"HR": [ActivityStatisticUnit.BeatsPerMinute, 15, 300], # Please visit the ER before you email me about these limits
"Cadence": [ActivityStatisticUnit.RevolutionsPerMinute, 0, 255], # FIT
"RunCadence": [ActivityStatisticUnit.StepsPerMinute, 0, 255], # FIT
"Strides": [ActivityStatisticUnit.Strides, 1, 9999999],
"Temperature": [ActivityStatisticUnit.DegreesCelcius, -62, 50],
"Energy": [ActivityStatisticUnit.Kilocalories, 1, 65535], # FIT
"Distance": [ActivityStatisticUnit.Kilometers, 0, 1000] # You can let me know when you ride 1000 km and I'll up this.
}
checkFields = ["Average", "Max", "Min", "Value"]
for key in ranges:
stat = stats.__dict__[key].asUnits(ranges[key][0])
for field in checkFields:
value = stat.__dict__[field]
if value is not None and (value < ranges[key][1] or value > ranges[key][2]):
stats.__dict__[key]._samples[field] = 0 # Need to update the original, not the asUnits copy
stats.__dict__[key].__dict__[field] = None
_cleanStatsObj(self.Stats)
for lap in self.Laps:
_cleanStatsObj(lap.Stats)
def CleanWaypoints(self):
# Similarly, we sometimes get complete nonsense like negative distance
waypoints = self.GetFlatWaypoints()
for wp in waypoints:
if wp.Distance and wp.Distance < 0:
wp.Distance = 0
if wp.Speed and wp.Speed < 0:
wp.Speed = 0
if wp.Cadence and wp.Cadence < 0:
wp.Cadence = 0
if wp.RunCadence and wp.RunCadence < 0:
wp.RunCadence = 0
if wp.Power and wp.Power < 0:
wp.Power = 0
if wp.Calories and wp.Calories < 0:
wp.Calories = 0 # Are there any devices that track your caloric intake? Interesting idea...
if wp.HR and wp.HR < 0:
wp.HR = 0
def __str__(self):
return "Activity (" + self.Type + ") Start " + str(self.StartTime) + " " + str(self.TZ) + " End " + str(self.EndTime) + " stat " + str(self.Stationary)
__repr__ = __str__
def __eq__(self, other):
# might need to fix this for TZs?
return self.StartTime == other.StartTime and self.EndTime == other.EndTime and self.Type == other.Type and self.Laps == other.Laps and self.Stats.Distance == other.Stats.Distance and self.Name == other.Name
def __ne__(self, other):
return not self.__eq__(other)
class UploadedActivity (Activity):
pass # will contain list of which service instances contain this activity - not really merited
class LapIntensity:
Active = 0
Rest = 1
Warmup = 2
Cooldown = 3
class LapTriggerMethod:
Manual = 0
Time = 1
Distance = 2
PositionStart = 3
PositionLap = 4
PositionWaypoint = 5
PositionMarked = 6
SessionEnd = 7
FitnessEquipment = 8
class Lap:
def __init__(self, startTime=None, endTime=None, intensity=LapIntensity.Active, trigger=LapTriggerMethod.Manual, stats=None, waypointList=None):
self.StartTime = startTime
self.EndTime = endTime
self.Trigger = trigger
self.Intensity = intensity
self.Stats = stats if stats else ActivityStatistics()
self.Waypoints = waypointList if waypointList else []
def __str__(self):
return str(self.StartTime) + "-" + str(self.EndTime) + " " + str(self.Intensity) + " (" + str(self.Trigger) + ") " + str(len(self.Waypoints)) + " wps"
__repr__ = __str__
class ActivityStatistics:
_statKeyList = ["Distance", "TimerTime", "MovingTime", "Energy", "Speed", "Elevation", "HR", "Cadence", "RunCadence", "Strides", "Temperature", "Power"]
def __init__(self, distance=None, timer_time=None, moving_time=None, avg_speed=None, max_speed=None, max_elevation=None, min_elevation=None, gained_elevation=None, lost_elevation=None, avg_hr=None, max_hr=None, avg_cadence=None, max_cadence=None, avg_run_cadence=None, max_run_cadence=None, strides=None, min_temp=None, avg_temp=None, max_temp=None, kcal=None, avg_power=None, max_power=None):
self.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=distance)
self.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=timer_time)
self.MovingTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=moving_time)
self.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=kcal)
self.Speed = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, avg=avg_speed, max=max_speed)
self.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters, max=max_elevation, min=min_elevation, gain=gained_elevation, loss=lost_elevation)
self.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=avg_hr, max=max_hr)
self.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=avg_cadence, max=max_cadence)
self.RunCadence = ActivityStatistic(ActivityStatisticUnit.StepsPerMinute, avg=avg_run_cadence, max=max_run_cadence)
self.Strides = ActivityStatistic(ActivityStatisticUnit.Strides, value=strides)
self.Temperature = ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, avg=avg_temp, max=max_temp, min=min_temp)
self.Power = ActivityStatistic(ActivityStatisticUnit.Watts, avg=avg_power, max=max_power)
def coalesceWith(self, other_stats):
for stat in ActivityStatistics._statKeyList:
self.__dict__[stat].coalesceWith(other_stats.__dict__[stat])
# Could overload +, but...
def sumWith(self, other_stats):
for stat in ActivityStatistics._statKeyList:
self.__dict__[stat].sumWith(other_stats.__dict__[stat])
# Magic dict is meh
def update(self, other_stats):
for stat in ActivityStatistics._statKeyList:
self.__dict__[stat].update(other_stats.__dict__[stat])
def __eq__(self, other):
if not other:
return False
for stat in ActivityStatistics._statKeyList:
if not self.__dict__[stat] == other.__dict__[stat]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class ActivityStatistic:
def __init__(self, units, value=None, avg=None, min=None, max=None, gain=None, loss=None):
self.Value = value
self.Average = avg
self.Min = min
self.Max = max
self.Gain = gain
self.Loss = loss
# Nothing outside of this class should be accessing _samples (though CleanStats gets a pass)
self._samples = {}
self._samples["Value"] = 1 if value is not None else 0
self._samples["Average"] = 1 if avg is not None else 0
self._samples["Min"] = 1 if min is not None else 0
self._samples["Max"] = 1 if max is not None else 0
self._samples["Gain"] = 1 if gain is not None else 0
self._samples["Loss"] = 1 if loss is not None else 0
self.Units = units
def asUnits(self, units):
if units == self.Units:
return self
newStat = ActivityStatistic(units)
existing_dict = dict(self.__dict__)
del existing_dict["Units"]
del existing_dict["_samples"]
ActivityStatistic.convertUnitsInDict(existing_dict, self.Units, units)
newStat.__dict__ = existing_dict
newStat.Units = units
newStat._samples = self._samples
return newStat
def convertUnitsInDict(values_dict, from_units, to_units):
for key, value in values_dict.items():
if value is None:
continue
values_dict[key] = ActivityStatistic.convertValue(value, from_units, to_units)
def convertValue(value, from_units, to_units):
conversions = {
(ActivityStatisticUnit.KilometersPerHour, ActivityStatisticUnit.HectometersPerHour): 10,
(ActivityStatisticUnit.KilometersPerHour, ActivityStatisticUnit.MilesPerHour): 0.621371,
(ActivityStatisticUnit.KilometersPerSecond, ActivityStatisticUnit.KilometersPerHour): 60 * 60,
(ActivityStatisticUnit.MilesPerHour, ActivityStatisticUnit.HundredYardsPerHour): 17.6,
(ActivityStatisticUnit.MetersPerSecond, ActivityStatisticUnit.KilometersPerHour): 3.6,
(ActivityStatisticUnit.DegreesCelcius, ActivityStatisticUnit.DegreesFahrenheit): (lambda C: C*9/5 + 32, lambda F: (F-32) * 5/9),
(ActivityStatisticUnit.Kilometers, ActivityStatisticUnit.Meters): 1000,
(ActivityStatisticUnit.Meters, ActivityStatisticUnit.Feet): 3.281,
(ActivityStatisticUnit.Meters, ActivityStatisticUnit.Yards): 1.09361,
(ActivityStatisticUnit.Miles, ActivityStatisticUnit.Feet): 5280,
(ActivityStatisticUnit.Kilocalories, ActivityStatisticUnit.Kilojoules): 4.184,
(ActivityStatisticUnit.StepsPerMinute, ActivityStatisticUnit.DoubledStepsPerMinute): 2
}
def recurseFindConversionPath(unit, target, stack):
assert(unit != target)
for transform in conversions.keys():
if unit in transform:
if transform in stack:
continue # Prevent circular conversion
if target in transform:
# We've arrived at the end
return stack + [transform]
else:
next_unit = transform[0] if transform[1] == unit else transform[1]
result = recurseFindConversionPath(next_unit, target, stack + [transform])
if result:
return result
return None
conversionPath = recurseFindConversionPath(from_units, to_units, [])
if not conversionPath:
raise ValueError("No conversion from %s to %s" % (from_units, to_units))
for transform in conversionPath:
if type(conversions[transform]) is float or type(conversions[transform]) is int:
if from_units == transform[0]:
value = value * conversions[transform]
from_units = transform[1]
else:
value = value / conversions[transform]
from_units = transform[0]
else:
if from_units == transform[0]:
func = conversions[transform][0] if type(conversions[transform]) is tuple else conversions[transform]
value = func(value)
from_units = transform[1]
else:
if type(conversions[transform]) is not tuple:
raise ValueError("No transform function for %s to %s" % (from_units, to_units))
value = conversions[transform][1](value)
from_units = transform[0]
return value
def coalesceWith(self, stat):
stat = stat.asUnits(self.Units)
items = ["Value", "Max", "Min", "Average", "Gain", "Loss"]
my_items = self.__dict__
other_items = stat.__dict__
my_samples = self._samples
other_samples = stat._samples
for item in items:
# Only average if there's a second value
if other_items[item] is not None:
# We need to override this so we can be lazy elsewhere and just assign values (.Average = ...) and don't have to use .update(ActivityStatistic(blah, blah, blah))
other_samples[item] = other_samples[item] if other_samples[item] else 1
if my_items[item] is None:
# We don't have this item's value, nothing to do really.
my_items[item] = other_items[item]
my_samples[item] = other_samples[item]
else:
my_items[item] += (other_items[item] - my_items[item]) / ((my_samples[item] + 1 / other_samples[item]))
my_samples[item] += other_samples[item]
def sumWith(self, stat):
""" Used if you want to sum up, for instance, laps' stats to get the activity's stats
Not all items can be simply summed (min/max), and sum just shouldn't (average)
"""
stat = stat.asUnits(self.Units)
summable_items = ["Value", "Gain", "Loss"]
other_items = stat.__dict__
for item in summable_items:
if item in other_items and other_items[item] is not None:
if self.__dict__[item] is not None:
self.__dict__[item] += other_items[item]
self._samples[item] = 1 # Break the chain of coalesceWith() calls - this is an entirely fresh "measurement"
else:
self.__dict__[item] = other_items[item]
self._samples[item] = stat._samples[item]
self.Average = None
self._samples["Average"] = 0
if self.Max is None or (stat.Max is not None and stat.Max > self.Max):
self.Max = stat.Max
self._samples["Max"] = stat._samples["Max"]
if self.Min is None or (stat.Min is not None and stat.Min < self.Min):
self.Min = stat.Min
self._samples["Min"] = stat._samples["Min"]
def update(self, stat):
stat = stat.asUnits(self.Units)
items = ["Value", "Max", "Min", "Average", "Gain", "Loss"]
other_items = stat.__dict__
for item in items:
if item in other_items and other_items[item] is not None:
self.__dict__[item] = other_items[item]
self._samples[item] = stat._samples[item]
def __eq__(self, other):
if not other:
return False
return self.Units == other.Units and self.Value == other.Value and self.Average == other.Average and self.Max == other.Max and self.Min == other.Min and self.Gain == other.Gain and self.Loss == other.Loss
def __ne__(self, other):
return not self.__eq__(other)
class ActivityStatisticUnit:
Seconds = "s"
Milliseconds = "ms"
Meters = "m"
Kilometers = "km"
Feet = "f"
Yards = "yd"
Miles = "mi"
DegreesCelcius = "ºC"
DegreesFahrenheit = "ºF"
KilometersPerHour = "km/h"
HectometersPerHour = "hm/h" # Silly Garmin Connect!
KilometersPerSecond = "km/s" # Silly (unnamed service)!
MetersPerSecond = "m/s"
MilesPerHour = "mph"
HundredYardsPerHour = "hydph" # Hundred instead of Hecto- because imperial :<
BeatsPerMinute = "BPM"
RevolutionsPerMinute = "RPM"
StepsPerMinute = "SPM"
DoubledStepsPerMinute = "2SPM" # Garmin Connect is still weird.
Strides = "strides"
Kilocalories = "kcal"
Kilojoules = "kj"
Watts = "W"
class WaypointType:
Start = 0 # Start of activity
Regular = 1 # Normal
Pause = 11 # All waypoints within a paused period should have this type
Resume = 12 # The first waypoint after a paused period
End = 100 # End of activity
class Waypoint:
__slots__ = ["Timestamp", "Location", "HR", "Calories", "Power", "Temp", "Cadence", "RunCadence", "Type", "Distance", "Speed"]
def __init__(self, timestamp=None, ptType=WaypointType.Regular, location=None, hr=None, power=None, calories=None, cadence=None, runCadence=None, temp=None, distance=None, speed=None):
self.Timestamp = timestamp
self.Location = location
self.HR = hr # BPM
self.Calories = calories # kcal
self.Power = power # Watts. I doubt there will ever be more parameters than this in terms of interchange
self.Temp = temp # degrees C. never say never
self.Cadence = cadence # RPM. dammit this better be the last one
self.RunCadence = runCadence # SPM. screw it
self.Distance = distance # meters. I don't even care any more.
self.Speed = speed # m/sec. neghhhhh
self.Type = ptType
def __eq__(self, other):
return self.Timestamp == other.Timestamp and self.Location == other.Location and self.HR == other.HR and self.Calories == other.Calories and self.Temp == other.Temp and self.Cadence == other.Cadence and self.Type == other.Type and self.Power == other.Power and self.RunCadence == other.RunCadence and self.Distance == other.Distance and self.Speed == other.Speed
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.Type) + "@" + str(self.Timestamp) + " " + ((str(self.Location.Latitude) + "|" + str(self.Location.Longitude) + "^" + str(round(self.Location.Altitude) if self.Location.Altitude is not None else None)) if self.Location is not None else "") + "\n\tHR " + str(self.HR) + " CAD " + str(self.Cadence) + " RCAD " + str(self.RunCadence) + " TEMP " + str(self.Temp) + " PWR " + str(self.Power) + " CAL " + str(self.Calories) + " SPD " + str(self.Speed) + " DST " + str(self.Distance)
__repr__ = __str__
class Location:
__slots__ = ["Latitude", "Longitude", "Altitude"]
def __init__(self, lat=None, lon=None, alt=None):
self.Latitude = lat
self.Longitude = lon
self.Altitude = alt
def __eq__(self, other):
if not other:
return False
return self.Latitude == other.Latitude and self.Longitude == other.Longitude and self.Altitude == other.Altitude
def __ne__(self, other):
return not self.__eq__(other)
| marxin/tapiriik | tapiriik/services/interchange.py | Python | apache-2.0 | 31,215 | [
"VisIt"
] | 3ce1e836415f91fee00b6d8bb199e3ffe96ddbd2836c6423549b77bd577ac022 |
"""
Probability density functions.
This module defines L{AbstractDensity}: a common interface for all PDFs.
Each L{AbstractDensity} describes a specific type of probability distribution,
for example L{Normal} is an implementation of the Gaussian distribution:
>>> pdf = Normal(mu=10, sigma=1.1)
>>> pdf.mu, pdf['sigma']
10.0, 1.1
Every PDF provides an implementation of the L{AbstractDensity.evaluate}
method, which evaluates the PDF for a list of input data points:
>>> pdf.evaluate([10, 9, 11, 12])
array([ 0.3626748 , 0.2399147 , 0.2399147 , 0.06945048])
PDF instances also behave like functions:
>>> pdf(data) # the same as pdf.evaluate(data)
Some L{AbstractDensity} implementations may support drawing random numbers from
the distribution (or raise an exception otherwise):
>>> pdf.random(2)
array([ 9.86257083, 9.73760515])
Each implementation of L{AbstractDensity} may support infinite number of estimators,
used to estimate and re-initialize the PDF parameters from a set of observed data
points:
>>> pdf.estimate([5, 5, 10, 10])
>>> pdf.mu, pdf.sigma
(7.5, 2.5)
>>> pdf.estimator
<csb.statistics.pdf.GaussianMLEstimator>
Estimators implement the L{AbstractEstimator} interface. They are treated as
pluggable tools, which can be exchanged through the L{AbstractDensity.estimator}
property (you could create, initialize and plug your own estimator as well).
This is a classic Strategy pattern.
"""
import numpy.random
import scipy.special
import csb.core
from abc import ABCMeta, abstractmethod
from csb.core import OrderedDict
from csb.numeric import log, exp, psi, inv_psi, EULER_MASCHERONI
from scipy.special import gammaln
from numpy import array, fabs, power, sqrt, pi, mean, median, clip
class IncompatibleEstimatorError(TypeError):
pass
class ParameterNotFoundError(AttributeError):
pass
class ParameterValueError(ValueError):
def __init__(self, param, value):
self.param = param
self.value = value
super(ParameterValueError, self).__init__(param, value)
def __str__(self):
return '{0} = {1}'.format(self.param, self.value)
class EstimationFailureError(ParameterValueError):
pass
class AbstractEstimator(object):
"""
Density parameter estimation strategy.
"""
__metaclass__ = ABCMeta
@abstractmethod
def estimate(self, context, data):
"""
Estimate the parameters of the distribution from same {data}.
@param context: context distribution
@type context: L{AbstractDensity}
@param data: sample values
@type data: array
@return: a new distribution, initialized with the estimated parameters
@rtype: L{AbstractDensity}
@raise EstimationFailureError: if estimation is not possible
"""
pass
class NullEstimator(AbstractEstimator):
"""
Does not estimate anything.
"""
def estimate(self, context, data):
raise NotImplementedError()
class LaplaceMLEstimator(AbstractEstimator):
def estimate(self, context, data):
x = array(data)
mu = median(x)
b = mean(fabs(x - mu))
return Laplace(mu, b)
class GaussianMLEstimator(AbstractEstimator):
def estimate(self, context, data):
x = array(data)
mu = mean(x)
sigma = sqrt(mean((x - mu) ** 2))
return Normal(mu, sigma)
class InverseGaussianMLEstimator(AbstractEstimator):
def estimate(self, context, data):
x = array(data)
mu = mean(x)
il = mean((1.0 / x) - (1.0 / mu))
if il == 0:
raise EstimationFailureError('lambda', float('inf'))
return InverseGaussian(mu, 1.0 / il)
class GammaMLEstimator(AbstractEstimator):
def __init__(self):
super(GammaMLEstimator, self).__init__()
self.n_iter = 1000
def estimate(self, context, data):
mu = mean(data)
logmean = mean(log(data))
a = 0.5 / (log(mu) - logmean)
for dummy in range(self.n_iter):
a = inv_psi(logmean - log(mu) + log(a))
return Gamma(a, a / mu)
class GenNormalBruteForceEstimator(AbstractEstimator):
def __init__(self, minbeta=0.5, maxbeta=8.0, step=0.1):
self._minbeta = minbeta
self._maxbeta = maxbeta
self._step = step
super(GenNormalBruteForceEstimator, self).__init__()
def estimate(self, context, data):
pdf = GeneralizedNormal(1, 1, 1)
data = array(data)
logl = []
for beta in numpy.arange(self._minbeta, self._maxbeta, self._step):
self.update(pdf, data, beta)
l = pdf.log_prob(data).sum()
logl.append([beta, l])
logl = numpy.array(logl)
# optimal parameters:
beta = logl[ numpy.argmax(logl[:, 1]) ][0]
self.update(pdf, data, beta)
return pdf
def estimate_with_fixed_beta(self, data, beta):
mu = median(data)
v = mean((data - mu) ** 2)
alpha = sqrt(v * exp(gammaln(1. / beta) - gammaln(3. / beta)))
return mu, alpha
def update(self, pdf, data, beta):
mu, alpha = self.estimate_with_fixed_beta(data, beta)
pdf.mu = mu
pdf.alpha = alpha
pdf.beta = beta
return pdf
class MultivariateGaussianMLEstimator(AbstractEstimator):
def __init__(self):
super(MultivariateGaussianMLEstimator, self).__init__()
def estimate(self, context, data):
return MultivariateGaussian(numpy.mean(data, 0), numpy.cov(data.T))
class DirichletEstimator(AbstractEstimator):
def __init__(self):
super(DirichletEstimator, self).__init__()
self.n_iter = 1000
self.tol = 1e-5
def estimate(self, context, data):
log_p = numpy.mean(log(data), 0)
e = numpy.mean(data, 0)
v = numpy.mean(data ** 2, 0)
q = (e[0] - v[0]) / (v[0] - e[0] ** 2)
a = e * q
y = a * 0
k = 0
while(sum(abs(y - a)) > self.tol and k < self.n_iter):
y = psi(sum(a)) + log_p
a = numpy.array(list(map(inv_psi, y)))
k += 1
return Dirichlet(a)
class GumbelMinMomentsEstimator(AbstractEstimator):
def estimate(self, context, data):
x = array(data)
beta = sqrt(6 * numpy.var(x)) / pi
mu = mean(x) + EULER_MASCHERONI * beta
return GumbelMinimum(mu, beta)
class GumbelMaxMomentsEstimator(AbstractEstimator):
def estimate(self, context, data):
x = array(data)
beta = sqrt(6 * numpy.var(x)) / pi
mu = mean(x) - EULER_MASCHERONI * beta
return GumbelMaximum(mu, beta)
class AbstractDensity(object):
"""
Defines the interface and common operations for all probability density
functions. This is a generic class which can operate on parameters of
any type (e.g. simple floats or custom parameter objects).
Subclasses must complete the implementation by implementing the
L{AbstractDensity.log_prob} method. Subclasses could also consider--but
are not obliged to--override the L{AbstractDensity.random} method. If
any of the density parameters need validation, subclasses are expected to
override the L{AbstractDensity._validate} method and raise
L{ParameterValueError} on validation failure. Note that implementing
parameter validation in property setters has almost no effect and is
discouraged.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._params = OrderedDict()
self._estimator = None
self.estimator = NullEstimator()
def __getitem__(self, param):
if param in self._params:
return self._params[param]
else:
raise ParameterNotFoundError(param)
def __setitem__(self, param, value):
if param in self._params:
self._validate(param, value)
self._set(param, value)
else:
raise ParameterNotFoundError(param)
def _set(self, param, value):
"""
Update the C{value} of C{param}.
"""
self._params[param] = value
@property
def estimator(self):
return self._estimator
@estimator.setter
def estimator(self, strategy):
if not isinstance(strategy, AbstractEstimator):
raise TypeError(strategy)
self._estimator = strategy
def __call__(self, x):
return self.evaluate(x)
def __str__(self):
name = self.__class__.__name__
params = ', '.join([ '{0}={1}'.format(p, v) for p, v in self._params.items() ])
return '{0}({1})'.format(name, params)
def _register(self, name):
"""
Register a new parameter name.
"""
if name not in self._params:
self._params[name] = None
def _validate(self, param, value):
"""
Parameter value validation hook.
@raise ParameterValueError: on failed validation (value not accepted)
"""
pass
def get_params(self):
return [self._params[name] for name in self.parameters]
def set_params(self, *values, **named_params):
for p, v in zip(self.parameters, values):
self[p] = v
for p in named_params:
self[p] = named_params[p]
@property
def parameters(self):
"""
Get a list of all distribution parameter names.
"""
return tuple(self._params)
@abstractmethod
def log_prob(self, x):
"""
Evaluate the logarithm of the probability of observing values C{x}.
@param x: values
@type x: array
@rtype: array
"""
pass
def evaluate(self, x):
"""
Evaluate the probability of observing values C{x}.
@param x: values
@type x: array
@rtype: array
"""
x = numpy.array(x)
return exp(self.log_prob(x))
def random(self, size=None):
"""
Generate random samples from the probability distribution.
@param size: number of values to sample
@type size: int
"""
raise NotImplementedError()
def estimate(self, data):
"""
Estimate and load the parameters of the distribution from sample C{data}
using the current L{AbstractEstimator} strategy.
@param data: sample values
@type data: array
@raise NotImplementedError: when no estimator is available for this
distribution
@raise IncompatibleEstimatorError: when the current estimator is not
compatible with this pdf
"""
try:
pdf = self.estimator.estimate(self, data)
for param in pdf.parameters:
self[param] = pdf[param]
except ParameterNotFoundError as e:
raise IncompatibleEstimatorError(self.estimator)
except ParameterValueError as e:
raise EstimationFailureError(e.param, e.value)
class BaseDensity(AbstractDensity):
"""
Base abstract class for all PDFs, which operate on simple float
or array-of-float parameters. Parameters of any other type will trigger
TypeError-s.
"""
def _set(self, param, value):
try:
if csb.core.iterable(value):
value = array(value)
else:
value = float(value)
except ValueError:
raise TypeError(value)
super(BaseDensity, self)._set(param, value)
class Laplace(BaseDensity):
def __init__(self, mu=0, b=1):
super(Laplace, self).__init__()
self._register('mu')
self._register('b')
self.set_params(b=b, mu=mu)
self.estimator = LaplaceMLEstimator()
def _validate(self, param, value):
if param == 'b' and value <= 0:
raise ParameterValueError(param, value)
@property
def b(self):
return self['b']
@b.setter
def b(self, value):
self['b'] = value
@property
def mu(self):
return self['mu']
@mu.setter
def mu(self, value):
self['mu'] = value
def log_prob(self, x):
b = self.b
mu = self.mu
return log(1 / (2. * b)) - fabs(x - mu) / b
def random(self, size=None):
loc = self.mu
scale = self.b
return numpy.random.laplace(loc, scale, size)
class Normal(BaseDensity):
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self._register('mu')
self._register('sigma')
self.set_params(mu=mu, sigma=sigma)
self.estimator = GaussianMLEstimator()
@property
def mu(self):
return self['mu']
@mu.setter
def mu(self, value):
self['mu'] = value
@property
def sigma(self):
return self['sigma']
@sigma.setter
def sigma(self, value):
self['sigma'] = value
def log_prob(self, x):
mu = self.mu
sigma = self.sigma
return log(1.0 / sqrt(2 * pi * sigma ** 2)) - (x - mu) ** 2 / (2 * sigma ** 2)
def random(self, size=None):
mu = self.mu
sigma = self.sigma
return numpy.random.normal(mu, sigma, size)
class InverseGaussian(BaseDensity):
def __init__(self, mu=1, shape=1):
super(InverseGaussian, self).__init__()
self._register('mu')
self._register('shape')
self.set_params(mu=mu, shape=shape)
self.estimator = InverseGaussianMLEstimator()
def _validate(self, param, value):
if value <= 0:
raise ParameterValueError(param, value)
@property
def mu(self):
return self['mu']
@mu.setter
def mu(self, value):
self['mu'] = value
@property
def shape(self):
return self['shape']
@shape.setter
def shape(self, value):
self['shape'] = value
def log_prob(self, x):
mu = self.mu
scale = self.shape
x = numpy.array(x)
if numpy.min(x) <= 0:
raise ValueError('InverseGaussian is defined for x > 0')
y = -0.5 * scale * (x - mu) ** 2 / (mu ** 2 * x)
z = 0.5 * (log(scale) - log(2 * pi * x ** 3))
return z + y
def random(self, size=None):
mu = self.mu
shape = self.shape
mu_2l = mu / shape / 2.
Y = numpy.random.standard_normal(size)
Y = mu * Y ** 2
X = mu + mu_2l * (Y - sqrt(4 * shape * Y + Y ** 2))
U = numpy.random.random(size)
m = numpy.less_equal(U, mu / (mu + X))
return m * X + (1 - m) * mu ** 2 / X
class GeneralizedNormal(BaseDensity):
def __init__(self, mu=0, alpha=1, beta=1):
super(GeneralizedNormal, self).__init__()
self._register('mu')
self._register('alpha')
self._register('beta')
self.set_params(mu=mu, alpha=alpha, beta=beta)
self.estimator = GenNormalBruteForceEstimator()
def _validate(self, param, value):
if param in ('alpha, beta') and value <= 0:
raise ParameterValueError(param, value)
@property
def mu(self):
return self['mu']
@mu.setter
def mu(self, value):
self['mu'] = value
@property
def alpha(self):
return self['alpha']
@alpha.setter
def alpha(self, value):
self['alpha'] = value
@property
def beta(self):
return self['beta']
@beta.setter
def beta(self, value):
self['beta'] = value
def log_prob(self, x):
mu = self.mu
alpha = self.alpha
beta = self.beta
return log(beta / (2.0 * alpha)) - gammaln(1. / beta) - power(fabs(x - mu) / alpha, beta)
class GeneralizedInverseGaussian(BaseDensity):
def __init__(self, a=1, b=1, p=1):
super(GeneralizedInverseGaussian, self).__init__()
self._register('a')
self._register('b')
self._register('p')
self.set_params(a=a, b=b, p=p)
self.estimator = NullEstimator()
def _validate(self, param, value):
if value <= 0:
raise ParameterValueError(param, value)
@property
def a(self):
return self['a']
@a.setter
def a(self, value):
self['a'] = value
@property
def b(self):
return self['b']
@b.setter
def b(self, value):
self['b'] = value
@property
def p(self):
return self['p']
@p.setter
def p(self, value):
self['p'] = value
def log_prob(self, x):
a = self['a']
b = self['b']
p = self['p']
lz = 0.5 * p * (log(a) - log(b)) - log(2 * scipy.special.kv(p, sqrt(a * b)))
return lz + (p - 1) * log(x) - 0.5 * (a * x + b / x)
def random(self, size=None):
from csb.statistics.rand import inv_gaussian
rvs = []
burnin = 10
a = self['a']
b = self['b']
p = self['p']
s = a * 0. + 1.
if p < 0:
a, b = b, a
if size == None:
size = 1
for i in range(int(size)):
for j in range(burnin):
l = b + 2 * s
m = sqrt(l / a)
x = inv_gaussian(m, l, shape=m.shape)
s = numpy.random.gamma(abs(p) + 0.5, x)
if p >= 0:
rvs.append(x)
else:
rvs.append(1 / x)
return numpy.array(rvs)
class Gamma(BaseDensity):
def __init__(self, alpha=1, beta=1):
super(Gamma, self).__init__()
self._register('alpha')
self._register('beta')
self.set_params(alpha=alpha, beta=beta)
self.estimator = GammaMLEstimator()
def _validate(self, param, value):
if value <= 0:
raise ParameterValueError(param, value)
@property
def alpha(self):
return self['alpha']
@alpha.setter
def alpha(self, value):
self['alpha'] = value
@property
def beta(self):
return self['beta']
@beta.setter
def beta(self, value):
self['beta'] = value
def log_prob(self, x):
a, b = self['alpha'], self['beta']
return a * log(b) - gammaln(clip(a, 1e-308, 1e308)) + \
(a - 1) * log(clip(x, 1e-308, 1e308)) - b * x
def random(self, size=None):
return numpy.random.gamma(self['alpha'], 1 / self['beta'], size)
class InverseGamma(BaseDensity):
def __init__(self, alpha=1, beta=1):
super(InverseGamma, self).__init__()
self._register('alpha')
self._register('beta')
self.set_params(alpha=alpha, beta=beta)
self.estimator = NullEstimator()
def _validate(self, param, value):
if value <= 0:
raise ParameterValueError(param, value)
@property
def alpha(self):
return self['alpha']
@alpha.setter
def alpha(self, value):
self['alpha'] = value
@property
def beta(self):
return self['beta']
@beta.setter
def beta(self, value):
self['beta'] = value
def log_prob(self, x):
a, b = self['alpha'], self['beta']
return a * log(b) - gammaln(a) - (a + 1) * log(x) - b / x
def random(self, size=None):
return 1. / numpy.random.gamma(self['alpha'], 1 / self['beta'], size)
class MultivariateGaussian(Normal):
def __init__(self, mu=numpy.zeros(2), sigma=numpy.eye(2)):
super(MultivariateGaussian, self).__init__(mu, sigma)
self.estimator = MultivariateGaussianMLEstimator()
def random(self, size=None):
return numpy.random.multivariate_normal(self.mu, self.sigma, size)
def log_prob(self, x):
from numpy.linalg import det
mu = self.mu
S = self.sigma
D = len(mu)
q = self.__q(x)
return -0.5 * (D * log(2 * pi) + log(abs(det(S)))) - 0.5 * q ** 2
def __q(self, x):
from numpy import sum, dot, reshape
from numpy.linalg import inv
mu = self.mu
S = self.sigma
return sqrt(clip(sum(reshape((x - mu) * dot(x - mu, inv(S).T.squeeze()), (-1, len(mu))), -1), 0., 1e308))
def conditional(self, x, dims):
"""
Return the distribution along the dimensions
dims conditioned on x
@param x: conditional values
@param dims: new dimensions
"""
from numpy import take, dot
from numpy.linalg import inv
dims2 = [i for i in range(self['mu'].shape[0]) if not i in dims]
mu1 = take(self['mu'], dims)
mu2 = take(self['mu'], dims2)
# x1 = take(x, dims)
x2 = take(x, dims2)
A = take(take(self['Sigma'], dims, 0), dims, 1)
B = take(take(self['Sigma'], dims2, 0), dims2, 1)
C = take(take(self['Sigma'], dims, 0), dims2, 1)
mu = mu1 + dot(C, dot(inv(B), x2 - mu2))
Sigma = A - dot(C, dot(inv(B), C.T))
return MultivariateGaussian((mu, Sigma))
class Dirichlet(BaseDensity):
def __init__(self, alpha):
super(Dirichlet, self).__init__()
self._register('alpha')
self.set_params(alpha=alpha)
self.estimator = DirichletEstimator()
@property
def alpha(self):
return self['alpha']
@alpha.setter
def alpha(self, value):
self['alpha'] = numpy.ravel(value)
def log_prob(self, x):
#TODO check wether x is in the probability simplex
alpha = self.alpha
return gammaln(sum(alpha)) - sum(gammaln(alpha)) \
+ numpy.dot((alpha - 1).T, log(x).T)
def random(self, size=None):
return numpy.random.mtrand.dirichlet(self.alpha, size)
class GumbelMinimum(BaseDensity):
def __init__(self, mu=0, beta=1):
super(GumbelMinimum, self).__init__()
self._register('mu')
self._register('beta')
self.set_params(mu=mu, beta=beta)
self.estimator = GumbelMinMomentsEstimator()
def _validate(self, param, value):
if param == 'beta' and value <= 0:
raise ParameterValueError(param, value)
@property
def mu(self):
return self['mu']
@mu.setter
def mu(self, value):
self['mu'] = value
@property
def beta(self):
return self['beta']
@beta.setter
def beta(self, value):
self['beta'] = value
def log_prob(self, x):
mu = self.mu
beta = self.beta
z = (x - mu) / beta
return log(1. / beta) + z - exp(z)
def random(self, size=None):
mu = self.mu
beta = self.beta
return -numpy.random.gumbel(-mu, beta, size)
class GumbelMaximum(GumbelMinimum):
def __init__(self, mu=0, beta=1):
super(GumbelMaximum, self).__init__(mu=mu, beta=beta)
self.estimator = GumbelMaxMomentsEstimator()
def log_prob(self, x):
mu = self.mu
beta = self.beta
z = (x - mu) / beta
return log(1. / beta) - z - exp(-z)
def random(self, size=None):
mu = self.mu
beta = self.beta
return numpy.random.gumbel(mu, beta, size)
| csb-toolbox/CSB | csb/statistics/pdf/__init__.py | Python | mit | 24,534 | [
"Gaussian"
] | b28670682f0432df6655811d805f72e3eb354d924163666284a3e9a564a61626 |
""" Tests for ProxyProvider modules module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import shutil
import unittest
from diraccfg import CFG
import DIRAC
from DIRAC import gConfig
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Resources.ProxyProvider.ProxyProviderFactory import ProxyProviderFactory
certsPath = os.path.join(os.path.dirname(DIRAC.__file__), 'Core/Security/test/certs')
diracTestCACFG = """
Resources
{
ProxyProviders
{
DIRAC_TEST_CA
{
ProviderType = DIRACCA
CertFile = %s
KeyFile = %s
Match =
Supplied = C, O, OU, CN
Optional = emailAddress
DNOrder = C, O, OU, CN, emailAddress
C = FR
O = DIRAC
OU = DIRAC TEST
}
}
}
""" % (os.path.join(certsPath, 'ca/ca.cert.pem'), os.path.join(certsPath, 'ca/ca.key.pem'))
userCFG = """
Registry
{
Users
{
testuser
{
DN = /C=FR/O=DIRAC/OU=DIRAC TEST/CN=DIRAC test user/[email protected]
}
}
Groups
{
dirac_user
{
Users = testuser
}
dirac_no_user
{
Users = nouser
}
}
}
"""
class DIRACCAPPTest(unittest.TestCase):
""" Base class for the Modules test cases
"""
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
cfg = CFG()
cfg.loadFromBuffer(diracTestCACFG)
gConfig.loadCFG(cfg)
cfg.loadFromBuffer(userCFG)
gConfig.loadCFG(cfg)
result = ProxyProviderFactory().getProxyProvider('DIRAC_TEST_CA')
self.assertTrue(result['OK'], '\n%s' % result.get('Message') or 'Error message is absent.')
self.pp = result['Value']
def tearDown(self):
pass
def test_getProxy(self):
for dn, res in [('/C=FR/O=DIRAC/OU=DIRAC TEST/CN=DIRAC test user/[email protected]', True),
('/C=FR/OU=DIRAC TEST/[email protected]', False),
('/C=FR/OU=DIRAC/O=DIRAC TEST/[email protected]', False),
('/C=FR/O=DIRAC/BADFIELD=DIRAC TEST/CN=DIRAC test user', False)]:
result = self.pp.getProxy(dn)
text = 'Must be ended %s%s' % ('successful' if res else 'with error',
': %s' % result.get('Message', 'Error message is absent.'))
self.assertEqual(result['OK'], res, text)
if res:
chain = X509Chain()
chain.loadChainFromString(result['Value'])
result = chain.getCredentials()
self.assertTrue(result['OK'], '\n%s' % result.get('Message') or 'Error message is absent.')
credDict = result['Value']
self.assertEqual(credDict['username'], 'testuser',
'%s, expected %s' % (credDict['username'], 'testuser'))
def test_generateProxyDN(self):
userDict = {"FullName": "John Doe",
"Email": "[email protected]",
"O": 'DIRAC',
'OU': 'DIRAC TEST',
'C': 'FR'}
result = self.pp.generateDN(**userDict)
self.assertTrue(result['OK'], '\n%s' % result.get('Message') or 'Error message is absent.')
result = self.pp.getProxy(result['Value'])
self.assertTrue(result['OK'], '\n%s' % result.get('Message') or 'Error message is absent.')
chain = X509Chain()
chain.loadChainFromString(result['Value'])
result = chain.getCredentials()
self.assertTrue(result['OK'], '\n%s' % result.get('Message') or 'Error message is absent.')
issuer = result['Value']['issuer']
self.assertEqual(issuer, '/C=FR/O=DIRAC/OU=DIRAC TEST/CN=John Doe/[email protected]')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(DIRACCAPPTest)
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| yujikato/DIRAC | tests/Integration/Resources/ProxyProvider/Test_DIRACCAProxyProvider.py | Python | gpl-3.0 | 3,956 | [
"DIRAC"
] | 7071ed2f3eae5c508c39c6e44af1b33bdc81ed2af29a7d44e61533c5b0ea6b7c |
# Import of the relevant tools
import time
import numpy as np
import theano
import theano.tensor as T
from theano import pp, config
import os
from plotly.tools import FigureFactory as FF
import plotly.graph_objs as go
from ..io.read_vtk import ReadVTK
from ..data_attachment.measures import Measures
from ..data_attachment.varifolds import Varifolds
from ..math_utils.kernels import _squared_distances, _gaussian_kernel
# a TheanoShapes manifold will be created from a regular Curve/Surface,
# with information about connectivity and number of points.
# Basically, a TheanoShapes is an efficient implementation of
# a shape orbit.
from .riemannian_manifold import RManifold
class TheanoHamiltonianClouds(RManifold) :
"""
Abstract class which implements the symbolic Hamiltonian dynamic.
"""
def __init__(self, kernel = ('gaussian', 1),
weights = (0.01, 1), # gamma_V, gamma_W
dt = 0.1,
plot_interactive = False,
plot_file = True,
foldername = 'results/'
) :
# We are going to re-implement the RManifold shoot and backward methods,
# so we may as well start from scratch.
# no need for RManifold.__init__(self, npoints, dimension, kernel, dt)
self.kernel = kernel
self.kernel_radius = kernel[1]
self.dt = dt
self.weight_regularization = weights[0]
self.weight_attachment = weights[1]
self.current_axis = []
self.plot_interactive = plot_interactive
self.plot_file = plot_file
self.foldername = foldername
def assert_folder(dname) :
if not os.path.exists(dname):
os.makedirs(dname)
assert_folder(self.foldername)
assert_folder(self.foldername + '/Descent/')
assert_folder(self.foldername + '/Descent/Models/')
assert_folder(self.foldername + '/Descent/Momentums/')
assert_folder(self.foldername + '/Descent/Plans/')
assert_folder(self.foldername + '/Grid/')
assert_folder(self.foldername + '/Momentums/')
assert_folder(self.foldername + '/Shoot/')
# We don't need all those legacy routines : ================================================
def K(self,q,p, kernels) :
raise(NotImplementedError)
def upP(self,q,p, kernels) :
raise(NotImplementedError)
def gradq_pKqz(self, p, q, z, kernels) :
raise(NotImplementedError)
def dq_gradq_pKqp_a(self, q, p, a, kernels) :
raise(NotImplementedError)
def dq_Kqp_a(self,q,p,a, kernels) :
raise(NotImplementedError)
# Symbolic Hamiltonian functions ==========================================================================
# Part 1 : cometric on the space of landmarks, kinetic energy on the phase space (Hamiltonian)-----
def _kq(self, q): # Computes the standard gaussian kernel matrix of variance kernel_radius
return _gaussian_kernel(q, q, self.kernel_radius)
def _Kq(self, q) :
k = self._kq(q)
return k
def _Hqp(self, q, p) :
"""Hamiltonian."""
pKqp = self._Kq(q) * (p.dot(p.T))
return .5 * T.sum(pKqp) # H(q,p) = (1/2) * sum_ij K(x_i,x_j) p_i.p_j
# Part 2 : Geodesic shooting ------------------------------------------------------------------
# The partial derivatives of the Hamiltonian are automatically computed !
def _dq_Hqp(self, q,p) :
return T.grad(self._Hqp(q,p), q)
def _dp_Hqp(self, q,p) :
return T.grad(self._Hqp(q,p), p)
def _hamiltonian_step(self, q,p) : # The "math" part of the code :
return [q + self.dt * self._dp_Hqp(q,p) , # Simplistic euler scheme
p - self.dt * self._dq_Hqp(q,p) ]
def _HamiltonianTrajectory(self, q, p) :
# Here, we use the "scan" theano routine, which can be understood as a "for" loop
result, updates = theano.scan(fn = lambda x,y : self._hamiltonian_step(x,y),
outputs_info = [q,p],
n_steps = int(np.round(1/self.dt) ))
return result
def _HamiltonianShooting(self, q, p) :
# Here, we use the "scan" theano routine, which can be understood as a "for" loop
result, updates = theano.scan(fn = lambda x,y : self._hamiltonian_step(x,y),
outputs_info = [q,p],
n_steps = int(np.round(1/self.dt) ))
final_result = [result[0][-1], result[1][-1]] # We do not store the intermediate results
return final_result # and only return the final state + momentum
# Part 2bis : Action on the ambiant space. -------------------------------------------------------
# This is useful to visualize the grid deformation, or in the control points setting
def _carry(self, q, p, s, dt) :
"""
Defines the infinitesimal action of a momentum p located at q
on the theano variable s.
"""
return s + dt * _gaussian_kernel(s, q, self.kernel_radius).dot(p)
def _hamiltonian_step_carrying(self, q,p, s) : # The "math" part of the code :
return [q + self.dt * self._dp_Hqp(q,p) , # Simplistic euler scheme
p - self.dt * self._dq_Hqp(q,p) ,
self._carry(q, p, s, self.dt) ]
def _HamiltonianTrajectoryCarrying(self, q, p, s) :
# Here, we use the "scan" theano routine, which can be understood as a "for" loop
result, updates = theano.scan(fn = lambda x,y,z : self._hamiltonian_step_carrying(x,y,z),
outputs_info = [q,p,s],
n_steps = int(np.round(1/self.dt) ))
return result
def _HamiltonianShootingCarrying(self, q, p, s) :
# Here, we use the "scan" theano routine, which can be understood as a "for" loop
result = self._HamiltonianTrajectoryCarrying(q, p, s)
final_result = [result[0][-1], result[1][-1], result[2][-1]] # We do not store the intermediate results
return final_result # and only return the final state + momentum
# Part 3 : Cost function and derivatives -------------------------------------------------------
def _cost(self, q, p, *args) :
cost_reg = self._Hqp(q,p)
cost_att = self._data_attachment(self._HamiltonianShooting(q,p)[0], *args) # C(q_0, p_0) = A(q_1, x_t)
return self.weight_regularization * cost_reg + self.weight_attachment * cost_att[0], cost_att[1]
# The discrete backward scheme is automatically computed :
def _dcost_q0(self, q,p, *args) : # Useful for template estimation
return T.grad(self._cost(q,p,*args)[0], q) # The gradients wrt. q_0 is automatically computed
def _dcost_p0(self, q,p,*args) : # Useful in a matching problem
return T.grad(self._cost(q,p,*args)[0], p) # The gradients wrt. p_0 is automatically computed
def _opt_shooting_cost(self, q0, p0, *args) : # Wrapper
cost_info = self._cost( q0, p0, *args)
return [cost_info[0] , # the actual cost
q0,#self._dcost_q0( q0, p0, *args) ,
self._dcost_p0( q0, p0, *args) ,
self._HamiltonianShooting(q0,p0)[0],
cost_info[1]] # Additional information (transport plan, etc.)
# Appendix : Collection of data attachment terms -----------------------------------------------
def _data_attachment(self, q1, *args) :
"""Selects the appropriate data attachment routine, depending on self's attributes."""
raise(NotImplementedError)
# Input-Output =================================================================================
def show(self, *args, **kwargs) :
if self.plot_interactive :
self.interactive_show(*args, **kwargs)
def interactive_show(self) :
raise(NotImplementedError)
def marker(self, *args, **kwargs) :
if self.plot_interactive :
self.interactive_marker(*args, **kwargs)
if self.plot_file :
self.file_marker(*args, **kwargs)
def marker_target(self, *args, **kwargs) :
if self.plot_interactive :
self.interactive_marker_target(*args, **kwargs)
if self.plot_file :
self.file_marker_target(*args, **kwargs)
def plot_traj(self, *args, **kwargs) :
if self.plot_interactive :
self.interactive_plot_traj(*args, **kwargs)
if self.plot_file :
self.file_plot_traj(*args, **kwargs)
def quiver(self, *args, **kwargs) :
if self.plot_interactive :
self.interactive_quiver(*args, **kwargs)
if self.plot_file :
self.file_quiver(*args, **kwargs)
def plot_momentums(self, *args, **kwargs) :
if self.plot_interactive :
self.interactive_plot_momentums(*args, **kwargs)
if self.plot_file :
self.file_plot_momentums(*args, **kwargs)
def show_transport(self, *args, **kwargs) :
#if self.plot_interactive :
# self.interactive_show_transport(*args, **kwargs)
if self.plot_file :
self.file_show_transport(*args, **kwargs)
| jeanfeydy/lddmm-ot | LDDMM_Python/lddmm_python/modules/manifolds/theano_hamiltonianclouds.py | Python | mit | 8,642 | [
"Gaussian"
] | 9936c3809c04da18547ffc4bca2164745bda11d1fc5daa601a6fcfc12756a41c |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver.p4util import solvers
from .augmented_hessian import ah_iteration
from .. import proc_util
def print_iteration(mtype, niter, energy, de, orb_rms, ci_rms, nci, norb, stype):
core.print_out("%s %2d: % 18.12f % 1.4e %1.2e %1.2e %3d %3d %s\n" %
(mtype, niter, energy, de, orb_rms, ci_rms, nci, norb, stype))
def mcscf_solver(ref_wfn):
# Build CIWavefunction
core.prepare_options_for_module("DETCI")
ciwfn = core.CIWavefunction(ref_wfn)
ciwfn.set_module("detci")
# Hush a lot of CI output
ciwfn.set_print(0)
# Begin with a normal two-step
step_type = 'Initial CI'
total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'), ciwfn.get_dimension('AV'))
start_orbs = ciwfn.get_orbitals("ROT").clone()
ciwfn.set_orbitals("ROT", start_orbs)
# Grab da options
mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE")
mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE")
mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER")
mcscf_type = core.get_option("DETCI", "MCSCF_TYPE")
mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3
mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS")
mcscf_wavefunction_type = core.get_option("DETCI", "WFN")
mcscf_ndet = ciwfn.ndet()
mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy()
mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT")
mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE")
# DIIS info
mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START")
mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ")
mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE")
mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS")
# One-step info
mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM")
mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD")
mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E")
mcscf_current_step_type = 'Initial CI'
# Start with SCF energy and other params
scf_energy = ciwfn.variable("HF TOTAL ENERGY")
eold = scf_energy
norb_iter = 1
converged = False
ah_step = False
qc_step = False
approx_integrals_only = True
# Fake info to start with the initial diagonalization
ediff = 1.e-4
orb_grad_rms = 1.e-3
# Grab needed objects
diis_obj = solvers.DIIS(mcscf_diis_max_vecs)
mcscf_obj = ciwfn.mcscf_object()
# Execute the rotate command
for rot in mcscf_rotate:
if len(rot) != 4:
raise p4util.PsiException("Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta).")
irrep, orb1, orb2, theta = rot
if irrep > ciwfn.Ca().nirrep():
raise p4util.PsiException("MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps" %
(str(rot)))
if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]:
raise p4util.PsiException("MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep" %
(str(rot)))
theta = np.deg2rad(theta)
x = ciwfn.Ca().nph[irrep][:, orb1].copy()
y = ciwfn.Ca().nph[irrep][:, orb2].copy()
xp = np.cos(theta) * x - np.sin(theta) * y
yp = np.sin(theta) * x + np.cos(theta) * y
ciwfn.Ca().nph[irrep][:, orb1] = xp
ciwfn.Ca().nph[irrep][:, orb2] = yp
# Limited RAS functionality
if core.get_local_option("DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS":
core.print_out("\n Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n")
core.print_out(" Switching to the TS algorithm.\n\n")
mcscf_target_conv_type = "TS"
# Print out headers
if mcscf_type == "CONV":
mtype = " @MCSCF"
core.print_out("\n ==> Starting MCSCF iterations <==\n\n")
core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n")
elif mcscf_type == "DF":
mtype = " @DF-MCSCF"
core.print_out("\n ==> Starting DF-MCSCF iterations <==\n\n")
core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n")
else:
mtype = " @AO-MCSCF"
core.print_out("\n ==> Starting AO-MCSCF iterations <==\n\n")
core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n")
# Iterate !
for mcscf_iter in range(1, mcscf_max_macroiteration + 1):
# Transform integrals, diagonalize H
ciwfn.transform_mcscf_integrals(approx_integrals_only)
nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3)
# After the first diag we need to switch to READ
ciwfn.set_ci_guess("DFILE")
ciwfn.form_opdm()
ciwfn.form_tpdm()
ci_grad_rms = ciwfn.variable("DETCI AVG DVEC NORM")
# Update MCSCF object
Cocc = ciwfn.get_orbitals("DOCC")
Cact = ciwfn.get_orbitals("ACT")
Cvir = ciwfn.get_orbitals("VIR")
opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
tpdm = ciwfn.get_tpdm("SUM", True)
mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)
current_energy = ciwfn.variable("MCSCF TOTAL ENERGY")
ciwfn.reset_ci_H0block()
orb_grad_rms = mcscf_obj.gradient_rms()
ediff = current_energy - eold
# Print iterations
print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms,
nci_iter, norb_iter, mcscf_current_step_type)
eold = current_energy
if mcscf_current_step_type == 'Initial CI':
mcscf_current_step_type = 'TS'
# Check convergence
if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\
(mcscf_iter > 3) and not qc_step:
core.print_out("\n %s has converged!\n\n" % mtype);
converged = True
break
# Which orbital convergence are we doing?
if ah_step:
converged, norb_iter, step = ah_iteration(mcscf_obj, print_micro=False)
norb_iter += 1
if converged:
mcscf_current_step_type = 'AH'
else:
core.print_out(" !Warning. Augmented Hessian did not converge. Taking an approx step.\n")
step = mcscf_obj.approx_solve()
mcscf_current_step_type = 'TS, AH failure'
else:
step = mcscf_obj.approx_solve()
step_type = 'TS'
maxstep = step.absmax()
if maxstep > mcscf_steplimit:
core.print_out(' Warning! Maxstep = %4.2f, scaling to %4.2f\n' % (maxstep, mcscf_steplimit))
step.scale(mcscf_steplimit / maxstep)
xstep = total_step.clone()
total_step.add(step)
# Do or add DIIS
if (mcscf_iter >= mcscf_diis_start) and ("TS" in mcscf_current_step_type):
# Figure out DIIS error vector
if mcscf_diis_error_type == "GRAD":
error = core.triplet(ciwfn.get_orbitals("OA"),
mcscf_obj.gradient(),
ciwfn.get_orbitals("AV"),
False, False, True)
else:
error = step
diis_obj.add(total_step, error)
if not (mcscf_iter % mcscf_diis_freq):
total_step = diis_obj.extrapolate()
mcscf_current_step_type = 'TS, DIIS'
# Build the rotation by continuous updates
if mcscf_iter == 1:
totalU = mcscf_obj.form_rotation_matrix(total_step)
else:
xstep.axpy(-1.0, total_step)
xstep.scale(-1.0)
Ustep = mcscf_obj.form_rotation_matrix(xstep)
totalU = core.doublet(totalU, Ustep, False, False)
# Build the rotation directly (not recommended)
# orbs_mat = mcscf_obj.Ck(start_orbs, total_step)
# Finally rotate and set orbitals
orbs_mat = core.doublet(start_orbs, totalU, False, False)
ciwfn.set_orbitals("ROT", orbs_mat)
# Figure out what the next step should be
if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\
(mcscf_iter >= 2):
if mcscf_target_conv_type == 'AH':
approx_integrals_only = False
ah_step = True
elif mcscf_target_conv_type == 'OS':
approx_integrals_only = False
mcscf_current_step_type = 'OS, Prep'
break
else:
continue
#raise p4util.PsiException("")
# If we converged do not do onestep
if converged or (mcscf_target_conv_type != 'OS'):
one_step_iters = []
# If we are not converged load in Dvec and build iters array
else:
one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1)
dvec = ciwfn.D_vector()
dvec.init_io_files(True)
dvec.read(0, 0)
dvec.symnormalize(1.0, 0)
ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True)
ci_grad.set_nvec(1)
ci_grad.init_io_files(True)
# Loop for onestep
for mcscf_iter in one_step_iters:
# Transform integrals and update the MCSCF object
ciwfn.transform_mcscf_integrals(ciwfn.H(), False)
ciwfn.form_opdm()
ciwfn.form_tpdm()
# Update MCSCF object
Cocc = ciwfn.get_orbitals("DOCC")
Cact = ciwfn.get_orbitals("ACT")
Cvir = ciwfn.get_orbitals("VIR")
opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
tpdm = ciwfn.get_tpdm("SUM", True)
mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)
orb_grad_rms = mcscf_obj.gradient_rms()
# Warning! Does not work for SA-MCSCF
current_energy = mcscf_obj.current_total_energy()
current_energy += mcscf_nuclear_energy
ciwfn.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy)
ciwfn.set_variable("CURRENT ENERGY", current_energy)
ciwfn.set_energy(current_energy)
docc_energy = mcscf_obj.current_docc_energy()
ci_energy = mcscf_obj.current_ci_energy()
# Compute CI gradient
ciwfn.sigma(dvec, ci_grad, 0, 0)
ci_grad.scale(2.0, 0)
ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0)
ci_grad_rms = ci_grad.norm(0)
orb_grad_rms = mcscf_obj.gradient().rms()
ediff = current_energy - eold
print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms,
nci_iter, norb_iter, mcscf_current_step_type)
mcscf_current_step_type = 'OS'
eold = current_energy
if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)):
core.print_out("\n %s has converged!\n\n" % mtype);
converged = True
break
# Take a step
converged, norb_iter, nci_iter, step = qc_iteration(dvec, ci_grad, ciwfn, mcscf_obj)
# Rotate integrals to new frame
total_step.add(step)
orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step)
ciwfn.set_orbitals("ROT", orbs_mat)
core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy)
# Die if we did not converge
if (not converged):
if core.get_global_option("DIE_IF_NOT_CONVERGED"):
raise p4util.PsiException("MCSCF: Iterations did not converge!")
else:
core.print_out("\nWarning! MCSCF iterations did not converge!\n\n")
# Print out CI vector information
if mcscf_target_conv_type == 'OS':
dvec.close_io_files()
ci_grad.close_io_files()
# For orbital invariant methods we transform the orbitals to the natural or
# semicanonical basis. Frozen doubly occupied and virtual orbitals are not
# modified.
if core.get_option("DETCI", "WFN") == "CASSCF":
# Do we diagonalize the opdm?
if core.get_option("DETCI", "NAT_ORBS"):
ciwfn.ci_nat_orbs()
else:
ciwfn.semicanonical_orbs()
# Retransform intragrals and update CI coeffs., OPDM, and TPDM
ciwfn.transform_mcscf_integrals(approx_integrals_only)
ciwfn.set_print(1)
ciwfn.set_ci_guess("H0_BLOCK")
nci_iter = ciwfn.diag_h(mcscf_e_conv, mcscf_e_conv ** 0.5)
ciwfn.form_opdm()
ciwfn.form_tpdm()
proc_util.print_ci_results(ciwfn, "MCSCF", scf_energy, current_energy, print_opdm_no=True)
# Set final energy
ciwfn.set_variable("CURRENT ENERGY", ciwfn.variable("MCSCF TOTAL ENERGY"))
ciwfn.set_energy(ciwfn.variable("MCSCF TOTAL ENERGY"))
# What do we need to cleanup?
if core.get_option("DETCI", "MCSCF_CI_CLEANUP"):
ciwfn.cleanup_ci()
if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"):
ciwfn.cleanup_dpd()
del diis_obj
del mcscf_obj
return ciwfn
| susilehtola/psi4 | psi4/driver/procrouting/mcscf/mcscf_solver.py | Python | lgpl-3.0 | 14,311 | [
"Psi4"
] | 710b32e6dd48f0115908640eacf559d0266ce4aa7b3b22594e13701b07530f8a |
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
# FIXME: Some titles, such as Thor Ragnarok, cause exceptions and not pulling URL correct. Need to investigate.
import re,traceback,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['watch32hd.co']
self.base_link = 'https://watch32hd.co/'
self.search_link = '/results/%s'
self.watch_link = '/watch?v=%s_%s'
# Working: https://watch32hd.co/results?q=guardians+of+the+galaxy
#
# https://watch32hd.co/watch?v=Guardians_Of_The_Galaxy_2014#video=ggvOQDQLiMEw0h2fAil9YwZbiUtwuMcBfCs1mQ_4
# https://watch32hd.co/watch?v=Guardians_Of_The_Galaxy_2014
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('Watch32 - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title']
year = data['year']
#url = urlparse.urljoin(self.base_link, self.search_link)
url = urlparse.urljoin(self.base_link, self.watch_link)
#url = url % (title.replace(':', '').replace(' ','_'), year)
url = url % (re.sub('[: \-]+','_',title), year)
search_results = client.request(url)
varid = re.compile('var frame_url = "(.+?)"',re.DOTALL).findall(search_results)[0].replace('/embed/','/streamdrive/info/')
res_chk = re.compile('class="title"><h1>(.+?)</h1>',re.DOTALL).findall(search_results)[0]
varid = 'http:'+varid
holder = client.request(varid)
links = re.compile('"src":"(.+?)"',re.DOTALL).findall(holder)
for link in links:
vid_url = link.replace('\\','')
if '1080' in res_chk:
quality = '1080p'
elif '720' in res_chk:
quality = '720p'
else:
quality = 'DVD'
sources.append({'source': 'Googlelink', 'quality': quality, 'language': 'en', 'url': vid_url, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('Watch32 - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url | RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/to_be_fixed/needsfixing/watch32.py | Python | gpl-2.0 | 3,615 | [
"Galaxy"
] | 9410bfae75decbc44d65c44ddf0ee11863e8969cb68e529028f6d7fb8feb4d86 |
import unittest
import warnings
from nose.tools import assert_equal
import numpy as np
import iminuit
from iminuit import describe
from iminuit.iminuit_warnings import InitialParamWarning
from probfit.funcutil import rename
from probfit.pdf import gaussian, linear
from probfit.costfunc import UnbinnedLH, BinnedLH, BinnedChi2, Chi2Regression, \
SimultaneousFit
def assert_almost_equal(x, y, delta=1e-7):
if y - delta < x < y + delta:
pass
else:
raise AssertionError('x = %f and y = %f differs more than %g' % (x, y, delta))
class TestFit(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", InitialParamWarning)
np.random.seed(0)
self.ndata = 20000
self.data = np.random.randn(self.ndata)
self.analytic = self.ndata * 0.5 * (np.log(2 * np.pi) + 1)
def test_UnbinnedLH(self):
f = gaussian
assert_equal(list(describe(f)), ['x', 'mean', 'sigma'])
lh = UnbinnedLH(gaussian, self.data,)
assert_equal(list(describe(lh)), ['mean', 'sigma'])
assert_almost_equal(lh(0, 1), 28188.201229348757)
minuit = iminuit.Minuit(lh)
assert_equal(minuit.errordef, 0.5)
def test_BinnedLH(self):
# write a better test... this depends on subtraction
f = gaussian
assert_equal(list(describe(f)), ['x', 'mean', 'sigma'])
lh = BinnedLH(gaussian, self.data, bound=[-3, 3])
assert_equal(list(describe(lh)), ['mean', 'sigma'])
assert_almost_equal(lh(0, 1), 20.446130781601543, 1)
minuit = iminuit.Minuit(lh)
assert_equal(minuit.errordef, 0.5)
def test_BinnedChi2(self):
f = gaussian
assert_equal(list(describe(f)), ['x', 'mean', 'sigma'])
lh = BinnedChi2(gaussian, self.data, bound=[-3, 3])
assert_equal(list(describe(lh)), ['mean', 'sigma'])
assert_almost_equal(lh(0, 1), 19951.005399882044, 1)
minuit = iminuit.Minuit(lh)
assert_equal(minuit.errordef, 1.0)
def test_Chi2Regression(self):
x = np.linspace(1, 10, 10)
y = 10 * x + 1
f = linear
assert_equal(list(describe(f)), ['x', 'm', 'c'])
lh = Chi2Regression(f, x, y)
assert_equal(list(describe(lh)), ['m', 'c'])
assert_almost_equal(lh(10, 1), 0)
assert_almost_equal(lh(10, 0), 10.)
minuit = iminuit.Minuit(lh)
assert_equal(minuit.errordef, 1.0)
def test_simultaneous(self):
np.random.seed(0)
data = np.random.randn(10000)
shifted = data + 3.
g1 = rename(gaussian, ['x', 'lmu', 'sigma'])
g2 = rename(gaussian, ['x', 'rmu', 'sigma'])
ulh1 = UnbinnedLH(g1, data)
ulh2 = UnbinnedLH(g2, shifted)
sim = SimultaneousFit(ulh1, ulh2)
assert_equal(describe(sim), ['lmu', 'sigma', 'rmu'])
minuit = iminuit.Minuit(sim, sigma=1.2, pedantic=False, print_level=0)
minuit.migrad()
assert(minuit.migrad_ok())
assert_almost_equal(minuit.values['lmu'], 0., delta=2 * minuit.errors['lmu'])
assert_almost_equal(minuit.values['rmu'], 3., delta=2 * minuit.errors['rmu'])
assert_almost_equal(minuit.values['sigma'], 1., delta=2 * minuit.errors['sigma'])
if __name__ == '__main__':
unittest.main()
| mtresch/probfit | test/testfit.py | Python | mit | 3,327 | [
"Gaussian"
] | 2895219b50d6413f95cfc8ff7bc5ba966d2169de3a60b82cc63c3c261a9a9484 |
__all__ = [ 'Generator' ]
import numpy as np
import molecules
a0 = 0.52917721092
class Generator( dict ):
"""
Used to create molecules, write dalton .mol files
using -param for study with use_calculator.py
water currently implemented only
plans to implement methanol
"""
def __init__(self, *args, **kwargs):
#This waater is TIP3P model,
self[ ("water", "tip3p", "a_hoh", "degree") ] = 104.52
self[ ("water", "tip3p", "r_oh", "AA") ] = 0.9572
#This waater is SPC model,
self[ ("water", "spc", "a_hoh", "degree") ] = 109.47
self[ ("water", "spc", "r_oh", "AA") ] = 1.0
self[ ("methanol", "gas_opt", "r_oh", "AA" ) ] = 0.967
self[ ("methanol", "gas_opt", "r_co", "AA" ) ] = 1.428
self[ ("methanol", "gas_opt", "r_ch", "AA" ) ] = 1.098
self[ ("methanol", "gas_opt", "a_coh", "degree" ) ] = 107.16
self[ ("methanol", "gas_opt", "a_hch", "degree" ) ] = 109.6
self[ ("methanol", "gas_opt", "a_hco", "degree" ) ] = 109.342
self[ ("methanol", "gas_opt", "d_hcoh", "h4", "degree" ) ] = 60.0
self[ ("methanol", "gas_opt", "d_hcoh", "h5", "degree" ) ] = -60.0
self[ ("methanol", "gas_opt", "d_hcoh", "h6", "degree" ) ] = 180.0
#Default options for water
for val in ["r", "tau", "theta", "rho1", "rho2", "rho3", ]:
self[ ( val, 'min') ] = 0.0
self[ ( val, 'max') ] = 0.0
self[ ( val, 'points') ] = 1
self[ ( 'r', 'min') ] = 5.0
self[ ( 'r', 'max') ] = 10.0
self[ ( 'r', 'points') ] = 1
# Set by default all parameters to False
for val in ["r", "tau", "theta", "rho1", "rho2", "rho3", ]:
self[ ( val, "active" ) ] = False
@staticmethod
def get_pe_b3lyp_dal( co = 1.0, AA = True, max_l = 2, sites = 3):
r_order = max_l + 1
if AA:
aa = "AA"
else:
aa = "AU"
co /= a0
st = """**DALTON INPUT
.RUN WAVE FUNCTION
.DIRECT
.PARALLELL
.PEQM
*PEQM
.BORDER
REDIST -%d %.1f %s %d
**WAVE FUNCTION
.DFT
B3LYP
**END OF DALTON INPUT""" % (max_l+1, co, aa, sites)
return st
@staticmethod
def get_qmmm_b3lyp_dal( damp = False):
if damp:
damp = "\n.DAMP"
else:
damp = ""
st = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
*QMMM
.QMMM%s
**WAVE FUNCTION
.DFT
B3LYP
**END OF DALTON INPUT""" % damp
return st
@staticmethod
def get_lin_dal( _string ):
if _string == 'hflin':
return Generator.get_hflin_dal()
elif _string == 'b3lyplin':
return Generator.get_b3lyplin_dal()
elif _string == 'camb3lyplin':
return Generator.get_camb3lyplin_dal()
elif _string == 'ccsdlin':
return Generator.get_ccsdlin_dal()
@staticmethod
def get_hf_imag_dal( freq = ("0.0",), functional = 'B3PW91' ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
**RESPONSE
*ABSORP
.ALPHA
.IMAG F
.FREQUE
"""
_string += str(len(freq)) + '\n'
freqs = " ".join( map( str, freq ) )
_string += freqs
_string += '\n'
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_b3lyplin_dal():
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
B3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_b3lypqua_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
B3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_hflin_freq_dal( freq = "0.0", au = True, nm = False ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
.FREQUE
1
%s
""" %( freq )
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_hfqua_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_hflin_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.HF
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_b3lyplin_freq_dal( freq = "0.0", au = True, nm = False ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
B3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
.FREQUE
1
%s
""" %( freq )
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_camb3lyplin_dal():
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
CAMB3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_camb3lypqua_dal( ):
return """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
CAMB3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*QUADRATIC
.QLOP
.DIPLEN
**END OF DALTON INPUT"""
@staticmethod
def get_camb3lyplin_freq_dal( freq = "0.0", au = True, nm = False ):
_string = """**DALTON INPUT
.RUN RESPONSE
.DIRECT
.PARALLELL
**WAVE FUNCTION
.DFT
CAMB3LYP
.INTERFACE
**INTEGRAL
.DIPLEN
.SECMOM
**RESPONSE
.PROPAV
XDIPLEN
.PROPAV
YDIPLEN
.PROPAV
ZDIPLEN
*LINEAR
.DIPLEN
.FREQUE
1
%s
""" %( freq )
_string += "**END OF DALTON INPUT\n"
return _string
@staticmethod
def get_ccsdlin_dal():
return """**DALTON INPUT
.RUN RESPONSE
**INTEGRALS
.DIPLEN
.SECMOM
**WAVE FUNCTION
.CC
*CC INPUT
.CCSD
*CCFOP
.DIPMOM
*CCLR
.DIPOLE
**END OF DALTON INPUT
"""
@staticmethod
def get_ccsdqua_dal():
return """
**DALTON INPUT
.RUN RESPONSE
**INTEGRALS
.DIPLEN
.SECMOM
**WAVE FUNCTION
.CC
*CC INPUT
.CCSD
*CCFOP
.DIPMOM
*CCLR
.DIPOLE
*CCQR
.DIPOLE
**END OF DALTON INPUT
"""
def get_mol( self,
center = [0,0,0],
mol = "water",
model = "tip3p",
AA = False ):
"""return molecule in center, all molecules have different definition
of euler angles
for water place O in origo
for methanol place C=O bond in origo
"""
if mol == "water":
#Geometrical parameters, dependent om model
if model == "tip3p":
r_oh = self[ ("water", "tip3p", "r_oh", "AA") ]
a_hoh = self[ ("water", "tip3p", "a_hoh","degree") ]
if model == "spc":
r_oh = self[ ("water", "spc", "r_oh", "AA") ]
a_hoh = self[ ("water", "spc", "a_hoh","degree") ]
if not AA:
r_oh = r_oh / a0
d = (90 - a_hoh/2 ) * np.pi / 180
xo = center[0]
yo = center[1]
zo = center[2]
xh1 = (center[0] + r_oh * np.cos(d))
yh1 = center[1]
zh1 = (center[2] + r_oh* np.sin(d))
xh2 = (center[0] - r_oh * np.cos(d))
yh2 = center[1]
zh2 = (center[2] + r_oh* np.sin(d))
h1 = molecules.Atom( **{ "AA" : AA,
"x" : xh1,
"y" : yh1,
"z" : zh1,
"element" : "H"} )
h2 = molecules.Atom( **{ "AA" : AA,
"x" : xh2,
"y" : yh2,
"z" : zh2,
"element" : "H"} )
o = molecules.Atom( **{ "AA" : AA,
"x" : xo,
"y" : yo,
"z" : zo,
"element" : "O"} )
w = molecules.Water( AA = AA)
w.append( o )
w.append( h1 )
w.append( h2 )
return w
elif mol == "methanol":
r_co = self[ ("methanol", "gas_opt", "r_co", "AA" )]
r_oh = self[ ("methanol", "gas_opt", "r_oh", "AA" )]
r_ch = self[ ("methanol", "gas_opt", "r_ch", "AA" )]
a_coh = self[ ("methanol", "gas_opt", "a_coh", "degree" ) ]
#a_hch = self[ ("methanol","gas_opt", "a_hch", "degree" ) ]
a_hco = self[ ("methanol", "gas_opt", "a_hco", "degree" ) ]
a_coh *= np.pi / 180
a_hco *= np.pi / 180
d_hcoh_4 = self[ ("methanol","gas_opt", "d_hcoh", "h4", "degree" ) ]
d_hcoh_4 *= np.pi / 180
d_hcoh_5 = self[ ("methanol","gas_opt", "d_hcoh", "h5", "degree" ) ]
d_hcoh_5 *= np.pi / 180
d_hcoh_6 = self[ ("methanol","gas_opt", "d_hcoh", "h6", "degree" ) ]
d_hcoh_6 *= np.pi / 180
if not AA:
r_co, r_oh, r_ch = r_co/a0, r_oh/a0, r_ch/a0
c1 = molecules.Atom( **{"x":0, "y":0, "z":-r_co/2, "AA": AA, "element":"C" } )
o2 = molecules.Atom( **{"x":0, "y":0, "z": r_co/2, "AA": AA, "element":"O" } )
h3 = molecules.Atom( **{"x":r_oh*np.cos( a_coh-np.pi/2),
"y":0,
"z":r_oh*np.sin( a_coh-np.pi/2) + r_co/2,
"AA": AA, "element":"H" } )
h4 = molecules.Atom( **{"x": r_ch*np.sin( a_hco ) * np.cos( d_hcoh_4 ),
"y": r_ch*np.sin( a_hco) * np.sin( d_hcoh_4 ),
"z": r_ch*np.cos( a_hco) - r_co/2 ,
"AA": AA, "element":"H" } )
h5 = molecules.Atom( **{"x": r_ch*np.sin( a_hco ) * np.cos( d_hcoh_5 ),
"y": r_ch*np.sin( a_hco) * np.sin( d_hcoh_5 ),
"z": r_ch*np.cos( a_hco) - r_co/2 ,
"AA": AA, "element":"H" } )
h6 = molecules.Atom( **{"x": r_ch*np.sin( a_hco ) * np.cos( d_hcoh_6 ),
"y": r_ch*np.sin( a_hco) * np.sin( d_hcoh_6 ),
"z": r_ch*np.cos( a_hco) - r_co/2 ,
"AA": AA, "element":"H" } )
m = Methanol()
m.append(c1)
m.append(o2)
m.append(h3)
m.append(h4)
m.append(h5)
m.append(h6)
return m
def gen_mols_param(self, mol = "water",
model = 'tip3p',
basis = ["ano-1 2", "ano-1 4 3 1"],
AA = True,
worst = False):
r = np.linspace( self[ ('r', 'min')] , self[ ('r', 'max')] ,
self[ ('r', 'points' ) ] )
tau = np.linspace( self[ ('tau', 'min')] , self[ ('tau', 'max')] ,
self[ ('tau', 'points' ) ] )
theta = np.linspace( self[ ('theta', 'min')] , self[ ('theta', 'max')] ,
self[ ('theta', 'points' ) ] )
rho1 = np.linspace( self[ ('rho1', 'min')], self[ ('rho1', 'max')],
self[ ('rho1', 'points' ) ] )
rho2 = np.linspace( self[ ('rho2', 'min')], self[ ('rho2', 'max')],
self[ ('rho2', 'points' ) ] )
rho3 = np.linspace( self[ ('rho3', 'min')], self[ ('rho3', 'max')],
self[ ('rho3', 'points' ) ] )
if model == 'tip3p':
r_oh = self[ ("water", 'tip3p', "r_oh", "AA") ]
a_hoh = np.pi * self[ ("water", 'tip3p', "a_hoh", "degree" )] / 180.0
else:
r_oh = self[ ("water", 'tip3p', "r_oh", "AA") ]
a_hoh = np.pi * self[ ("water", 'tip3p', "a_hoh", "degree" )] / 180.0
for i in r:
for j in tau:
for k in theta:
for l in rho1:
for m in rho2:
for n in rho3:
w1 = molecules.Water.get_standard( AA = AA)
w1.t( -w1.o.r )
if worst:
w1 = self.get_mol( [0, 0, 0],
mol = mol,
model = model, AA = AA)
w1.populate_bonds()
w1.populate_angles()
w1.h1.scale_angle( 0.988 )
w1.h1.scale_bond( 0.985 )
w1.h2.scale_bond( 1.015 )
w1.inv_rotate()
w2 = molecules.Water.get_standard( AA = AA )
w2.t( -w2.o.r )
x, y, z = self.polar_to_cartesian( i, j, k )
w2.rotate( l, m, n )
w2.t( np.array( [x, y, z]) )
name = ""
name += "-".join( map( str, ["%3.2f"%i, "%3.2f"%j, "%3.2f"%k, "%3.2f"%l, "%3.2f"%m, "%3.2f"%n] ) )
name += ".mol"
c = molecules.Cluster( w1, w2 )
tmp_mol = c.get_mol_string( basis = tuple(basis))
f_ = open(name, 'w')
f_.write( tmp_mol )
return 0
def vary_parameters( self, opts ):
"""Given two parameters, e.g. r and theta, keeps all other static
param_list should be list of strings of parameters
["r":{"min": 2, "max":5, "points": 10}, "rho1" , ... ]
Has sane defaults, but can be overrided by passing arguments to
main program as:
-r_min 5
-r_max 10
-r_points 10
Which overrides defaults
"""
for val in opts:
self[ (val, 'active') ] = True
self[ (val, 'min') ] = opts[val][ "min" ]
self[ (val, 'max') ] = opts[val][ "max" ]
self[ (val, 'points') ] = opts[val][ "points" ]
def polar_to_cartesian(self, r, tau, theta):
x, y, z = r* np.sin( theta )*np.cos( tau ) \
, r* np.sin( theta )*np.sin( tau ) \
, r* np.cos( theta )
return x , y , z
def one_mol_gen(self, mol = 'water', model = 'tip3p',):
"""
Only implemented for water so far"""
if mol == "water":
d = self[ ("r_oh_dev", "max") ]
p = self[ ("r_oh_dev", "points") ]
r_d = 0.01*np.linspace( -d, d, p )
d = self[ ("theta_hoh_dev", "max") ]
p = self[ ("theta_hoh_dev", "points") ]
theta_d = 0.01*np.linspace( -d, d, p )
#a_hoh = self[ ( mol, model, "a_hoh", "degree" ) ] *np.pi/180
#r_oh = self[ ( mol, model, "r_oh", "AA" ) ]
for i in r_d:
for j in r_d:
for k in theta_d:
scale_bond1 = 1 + i
scale_bond2 = 1 + j
scale_angle = 1 + k
names = map( lambda x:"%.3f"%x, [i, j, k] )
w = self.get_mol( mol = mol, model = model)
w.populate_bonds() ; w.populate_angles()
w.h1.scale_bond( scale_bond1 )
w.h2.scale_bond( scale_bond2 )
w.h1.scale_angle( scale_angle )
w.inv_rotate()
open( "_".join([model]+names) + ".mol",'w').write(w.get_mol_string())
def build_pna( self, xyz = "tmp.xyz", waters = 0,
min_r = 2.0,
mult_r = 10,
seed = 111 ):
pna = Molecule.from_xyz( xyz )
freqs = [ "0.0", "0.0238927", "0.0428227", "0.0773571" ]
np.random.seed( seed )
c = molecules.Cluster()
c.add_mol(pna, in_qm = True)
cnt = 0
while cnt < waters:
# Random rotation angles
t1 = np.random.uniform( 0, np.pi/2 )
t2 = np.random.uniform( 0, np.pi )
t3 = np.random.uniform( 0, np.pi/2 )
# random length, rho and tau
r = np.random.uniform( min_r , min_r * mult_r)
tau = np.random.uniform( 0, np.pi*2)
theta = np.random.uniform( 0,np.pi)
center = self.polar_to_cartesian( r, tau, theta )
wat = self.get_mol( center = pna.com + center,
mol = "water")
wat.rotate( t1, t2, t3 )
wat._res_id = cnt
if c.mol_too_close( wat ):
continue
#We are satisfied with this position, add properties to the water, and rotate them according to t1, t2, t3 so they match the water orientation
c.add_mol( wat, in_mm = True )
cnt += 1
for f_mm in freqs:
for dist in ["nodist", "dist"]:
for wat in [ m for m in c if m.in_mm ]:
t1, t2, t3 = wat.get_euler()
kwargs_dict = Template().get( *("TIP3P", "HF", "ANOPVDZ",
dist == "dist",f_mm ) )
for at in wat:
Property.add_prop_from_template( at, kwargs_dict )
Property.transform_ut_properties( wat.h1.Property, t1,t2,t3 )
Property.transform_ut_properties( wat.h2.Property, t1,t2,t3 )
Property.transform_ut_properties( wat.o.Property, t1,t2,t3 )
#Write out QM and MM region separately with properties
open("pna.mol" ,'w').write(c.get_qm_mol_string(
basis= ("ano-1 2 1", "ano-1 3 2 1"),
AA = True))
open("%dmm_%s_%s.pot" %(waters, f_mm, dist ),'w').write(c.get_qmmm_pot_string( in_AA = True ))
open("tmp.xyz", 'w').write( c.get_xyz_string() )
| fishstamp82/moltools | moltools/generator.py | Python | mit | 18,121 | [
"Dalton"
] | cbad4eae034f472005d10777208f2e474bb61c0b69701d3bd79c56aded6d2dfe |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/basic-neural-network.py
# Basic example of using neural networks
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# Creation of a three layer neural network. The first layer is the input layer,
# receives the inputs, and it contains 2 neurons. The second layer is the first
# hidden layer and will contain 2 neurons. The last layer is the output layer
# and will contain only one neuron. We will choose as activation function the
# `Sigmoid` class, and `BackPropagation` class as the learning rule.
nn = p.FeedForward((2, 2, 1), p.Sigmoid, p.BackPropagation)
# We can use the `[]` operator to select a specific layer. Notice that the input
# layer cannot be modified in any way, so `[0]`-th layer is the first hidden
# layer. The `weights` property of a layer is an array containing the synaptic
# weights of those layer -- each line is the weight vector of the corresponding
# neuron.
nn[0].weights = array([[ 0.5, 0.5 ],
[ -0.5, -0.5 ]], dtype = float)
# We set up the synaptic weights of the neuron on the last layer. Notice that
# this layer could be accessed as `[-1]`, as a FeedForward network is only a
# list of `Layers`.
nn[1].weights = array([ 0.25, -0.25 ], dtype = float)
# This is an example that will be shown to the network for learning.
x = array([ 0.8, 0.2 ], dtype = float) # Input vector
d = 0.9 # Desired response
# We feed the network the input by calling the network as a function. The
# argument to the function is the input vector. The function returns the output
# of the network.
y = nn(x)
# The method below tells the network to learn the example. The specified
# learning rule, in this case the BackPropagation, will be used to adapt the
# synaptic weights of the network.
nn.feed(x, d)
# The code below shows the results
print "Peach tutorial on neural network basics"
print
print "Input to the network:"
print x
print "Network output:"
print y
print
print "Error: %7.4f" % (d - y,)
print
print "Updated weights in the first hidden layer:"
print nn[0].weights
print
print "Updated weights in the output layer:"
print nn[1].weights
print
print "Network output with updated weights:"
print nn(x)
print
print "Updated error: %7.4f" % (d - nn(x),)
print | anki1909/peach | tutorial/neural-networks/basic-neural-network.py | Python | lgpl-2.1 | 2,642 | [
"NEURON"
] | cebf1a465015b778f4b3538d4ce9cd5b0b3af5915fbc7b30ed5701d5594bc981 |
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix or nt, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory (always '.')
- os.pardir is a string representing the parent directory (always '..')
- os.sep is the (or a most common) pathname separator ('/' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import abc
import sys
import stat as st
from _collections_abc import _check_methods
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FDOPENDIR", "scandir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, exist_ok=exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false has no effect on the behavior of os.walk(), since the
directories in dirnames have already been generated by the time dirnames
itself is generated. No matter the value of topdown, the list of
subdirectories is retrieved before the tuples for the directory and its
subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
top = fspath(top)
dirs = []
nondirs = []
walk_dirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
walk_dirs.append(entry.path)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
yield from walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {scandir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
if not follow_symlinks:
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, isinstance(top, bytes),
topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, isbytes, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
scandir_it = scandir(topfd)
dirs = []
nondirs = []
entries = None if topdown or follow_symlinks else []
for entry in scandir_it:
name = entry.name
if isbytes:
name = fsencode(name)
try:
if entry.is_dir():
dirs.append(name)
if entries is not None:
entries.append(entry)
else:
nondirs.append(name)
except OSError:
try:
# Add dangling symlinks, ignore disappeared files
if entry.is_symlink():
nondirs.append(name)
except OSError:
pass
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs if entries is None else zip(dirs, entries):
try:
if not follow_symlinks:
if topdown:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=False)
else:
assert entries is not None
name, entry = name
orig_st = entry.stat(follow_symlinks=False)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, isbytes,
topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
if path.dirname(file):
exec_func(file, *argrest)
return
saved_exc = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except (FileNotFoundError, NotADirectoryError) as e:
last_exc = e
except OSError as e:
last_exc = e
if saved_exc is None:
saved_exc = e
if saved_exc is not None:
raise saved_exc
raise last_exc
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
# list() from dict object is an atomic operation
keys = list(self._data)
for key in keys:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
def fsencode(filename):
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
encoding with 'surrogateescape' error handler, return bytes unchanged.
On Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, str):
return filename.encode(encoding, errors)
else:
return filename
def fsdecode(filename):
"""Decode filename (an os.PathLike, bytes, or str) from the filesystem
encoding with 'surrogateescape' error handler, return str unchanged. On
Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
return filename
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
if not isinstance(args, (tuple, list)):
raise TypeError('argv must be a tuple or a list')
if not args or not args[0]:
raise ValueError('argv first element cannot be empty')
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] isn't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
# For testing purposes, make sure the function is available when the C
# implementation exists.
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
# If there is no C implementation, make the pure Python version the
# implementation as transparently as possible.
if not _exists('fspath'):
fspath = _fspath
fspath.__name__ = "fspath"
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
if cls is PathLike:
return _check_methods(subclass, '__fspath__')
return NotImplemented
| prefetchnta/questlab | bin/x64bin/python/37/Lib/os.py | Python | lgpl-2.1 | 38,984 | [
"VisIt"
] | 0d2e9b0c308d8120d3a459abcdfcee65d8c25d8197ecf84fe9107fa225a35580 |
# GromacsWrapper: core.py
# Copyright (c) 2009 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.core` -- Core functionality
=========================================
Here the basic command class :class:`GromacsCommand` is defined. All Gromacs
command classes in :mod:`gromacs.tools` are automatically generated from
it. The documentation of :class:`GromacsCommand` applies to all wrapped Gromacs
commands and should be read by anyone using this package.
.. _input-output-label:
Input and Output
----------------
Each command wrapped by either :class:`GromacsCommand` or :class:`Command`
takes three additional keyword arguments: *stdout*, *stderr*, and
*input*. *stdout* and *stderr* determine how the command returns its own
output.
The *input* keyword is a string that is fed to the standard input of the
command (actually, :attr:`subprocess.Popen.stdin`). Or, if it is not string-like
then we assume it's actually a file-like object that we can read from, e.g. a
:attr:`subprocess.Popen.stdout` or a :class:`File`.
By setting the *stdout* and *stderr* keywords appropriately, one can have the
output simply printed to the screen (use ``True``; this is the default,
although see below for the use of the ``capture_output``
:mod:`gromacs.environment` flag), capture in a python variable as a string for
further processing (use ``False``), write to a file (use a :class:`File`
instance) or as input for another command (e.g. use the
:attr:`subprocess.Popen.stdin`).
When writing setup- and analysis pipelines it can be rather cumbersome to have
the gromacs output on the screen. For these cases GromacsWrapper allows you to
change its behaviour globally. By setting the value of the
:mod:`gromacs.environment` :class:`~gromacs.environment.Flag`
``capture_output`` to ``True`` (in the GromacsWrapper
:data:`gromacs.environment.flags` registry) ::
import gromacs.environment
gromacs.environment.flags['capture_output'] = True
all commands will capture their output (like *stderr* = ``False`` and *stdout*
= ``False``). Explicitly setting these keywords overrides the global
default. The default value for ``flags['capture_output']`` is ``False``,
i.e. output is directed through STDOUT and STDERR.
.. Warning::
One downside of ``flags['capture_output'] = True`` is that it becomes much
harder to debug scripts unless the script is written in such a way to show
the output when the command fails. Therefore, it is advisable to only
capture output on well-tested scripts.
A third value of ``capture_output`` is the value ``"file"``::
gromacs.environment.flags['capture_output'] = "file"
This writes the captured output to a file. The file name is specified in
``flags['capture_output_filename'`` and defaults to
*"gromacs_captured_output.txt"*. This file is *over-written* for each
command. In this way one can investigate the output from the last command
(presumably because it failed). STDOUT and STDERR are captured into this file
by default. STDERR is printed first and then STDOUT, which does not necessarily
reflect the order of output one would see on the screen. If your code captures
STDOUT for further processing then an uncaptured STDERR is written to the
capture file.
.. Note::
There are some commands for which capturing output
(``flags['capture_output'] = True``) might be problematic. If the command
produces a large or inifinite amount of data then a memory error will occur
because Python nevertheless stores the output internally first. Thus one
should avoid capturing progress output from
e.g. :class:`~gromacs.tools.Mdrun` unless the output has been throttled
appropriately.
Classes
-------
.. autoclass:: GromacsCommand
:members: __call__, run, transform_args, Popen, help,
check_failure, gmxdoc
:inherited-members:
.. autoclass:: Command
:members: __call__, run, transform_args, Popen, help,
command_name
.. autoclass:: PopenWithInput
:members:
"""
from __future__ import absolute_import, with_statement
__docformat__ = "restructuredtext en"
import sys
import re
import subprocess
from subprocess import STDOUT, PIPE
import warnings
import errno
import logging
logger = logging.getLogger('gromacs.core')
from .exceptions import GromacsError, GromacsFailureWarning
from . import environment
class Command(object):
"""Wrap simple script or command."""
#: Derive a class from command; typically one only has to set *command_name*
#: to the name of the script or executable. The full path is required if it
#: cannot be found by searching :envvar:`PATH`.
command_name = None
def __init__(self,*args,**kwargs):
"""Set up the command class.
The arguments can always be provided as standard positional
arguments such as
``"-c", "config.conf", "-o", "output.dat", "--repeats=3", "-v", "input.dat"``
In addition one can also use keyword arguments such as
``c="config.conf", o="output.dat", repeats=3, v=True``
These are automatically transformed appropriately according to
simple rules:
* Any single-character keywords are assumed to be POSIX-style
options and will be prefixed with a single dash and the value
separated by a space.
* Any other keyword is assumed to be a GNU-style long option
and thus will be prefixed with two dashes and the value will
be joined directly with an equals sign and no space.
If this does not work (as for instance for the options of the
UNIX ``find`` command) then provide options and values in the
sequence of positional arguments.
"""
self.args = args
self.kwargs = kwargs
def run(self,*args,**kwargs):
"""Run the command; args/kwargs are added or replace the ones given to the constructor."""
_args, _kwargs = self._combine_arglist(args, kwargs)
results, p = self._run_command(*_args, **_kwargs)
return results
def _combine_arglist(self, args, kwargs):
"""Combine the default values and the supplied values."""
_args = self.args + args
_kwargs = self.kwargs.copy()
_kwargs.update(kwargs)
return _args, _kwargs
def _run_command(self,*args,**kwargs):
"""Execute the command; see the docs for __call__.
:Returns: a tuple of the *results* tuple ``(rc, stdout, stderr)`` and
the :class:`Popen` instance.
"""
# hack to run command WITHOUT input (-h...) even though user defined
# input (should have named it "ignore_input" with opposite values...)
use_input = kwargs.pop('use_input', True)
# logic for capturing output (see docs on I/O and the flags)
capturefile = None
if environment.flags['capture_output'] is True:
# capture into Python vars (see subprocess.Popen.communicate())
kwargs.setdefault('stderr', PIPE)
kwargs.setdefault('stdout', PIPE)
elif environment.flags['capture_output'] == "file":
if 'stdout' in kwargs and 'stderr' in kwargs:
pass
else:
# XXX: not race or thread proof; potentially many commands write to the same file
fn = environment.flags['capture_output_filename']
capturefile = file(fn, "w") # overwrite (clobber) capture file
if 'stdout' in kwargs and 'stderr' not in kwargs:
# special case of stdout used by code but stderr should be captured to file
kwargs.setdefault('stderr', capturefile)
else:
# merge stderr with stdout and write stdout to file
# (stderr comes *before* stdout in capture file, could split...)
kwargs.setdefault('stderr', STDOUT)
kwargs.setdefault('stdout', capturefile)
try:
p = self.Popen(*args, **kwargs)
out, err = p.communicate(use_input=use_input) # special Popen knows input!
except:
if capturefile is not None:
logger.error("Use captured command output in %r for diagnosis.", capturefile)
raise
finally:
if capturefile is not None:
capturefile.close()
rc = p.returncode
return (rc, out, err), p
def _commandline(self, *args, **kwargs):
"""Returns the command line (without pipes) as a list."""
# transform_args() is a hook (used in GromacsCommand very differently!)
return [self.command_name] + self.transform_args(*args,**kwargs)
def commandline(self, *args, **kwargs):
"""Returns the commandline that run() uses (without pipes)."""
# this mirrors the setup in run()
_args, _kwargs = self._combine_arglist(args, kwargs)
return self._commandline(*_args, **_kwargs)
def Popen(self, *args,**kwargs):
"""Returns a special Popen instance (:class:`PopenWithInput`).
The instance has its input pre-set so that calls to
:meth:`~PopenWithInput.communicate` will not need to supply
input. This is necessary if one wants to chain the output from
one command to an input from another.
:TODO:
Write example.
"""
stderr = kwargs.pop('stderr', None) # default: print to stderr (if STDOUT then merge)
if stderr is False: # False: capture it
stderr = PIPE
elif stderr is True:
stderr = None # use stderr
stdout = kwargs.pop('stdout', None) # either set to PIPE for capturing output
if stdout is False: # ... or to False
stdout = PIPE
elif stdout is True:
stdout = None # for consistency, make True write to screen
stdin = kwargs.pop('stdin', None)
input = kwargs.pop('input', None)
if input:
stdin = PIPE
if isinstance(input, basestring):
# make sure that input is a simple string with \n line endings
if not input.endswith('\n'):
input += '\n'
else:
try:
# make sure that input is a simple string with \n line endings
# XXX: this is probably not unicode safe because of the suse of str()
input = '\n'.join(map(str, input)) + '\n'
except TypeError:
# so maybe we are a file or something ... and hope for the best
pass
cmd = self._commandline(*args, **kwargs) # lots of magic happening here
# (cannot move out of method because filtering of stdin etc)
try:
p = PopenWithInput(cmd, stdin=stdin, stderr=stderr, stdout=stdout,
universal_newlines=True, input=input)
except OSError,err:
logger.error(" ".join(cmd)) # log command line
if err.errno == errno.ENOENT:
errmsg = "Failed to find Gromacs command %r, maybe its not on PATH or GMXRC must be sourced?" % self.command_name
logger.fatal(errmsg)
raise OSError(errmsg)
else:
logger.exception("Setting up Gromacs command %r raised an exception." % self.command_name)
raise
logger.debug(p.command_string)
return p
def transform_args(self, *args, **kwargs):
"""Transform arguments and return them as a list suitable for Popen."""
options = []
for option,value in kwargs.items():
if not option.startswith('-'):
# heuristic for turning key=val pairs into options
# (fails for commands such as 'find' -- then just use args)
if len(option) == 1:
option = '-' + option # POSIX style
else:
option = '--' + option # GNU option
if value is True:
options.append(option)
continue
elif value is False:
raise ValueError('A False value is ambiguous for option %r' % option)
if option[:2] == '--':
options.append(option + '=' + str(value)) # GNU option
else:
options.extend((option, str(value))) # POSIX style
return options + list(args)
def help(self,long=False):
"""Print help; same as using ``?`` in ``ipython``. long=True also gives call signature."""
print "\ncommand: %s\n\n" % self.command_name
print self.__doc__
if long:
print "\ncall method: command():\n"
print self.__call__.__doc__
def __call__(self,*args,**kwargs):
"""Run command with the given arguments::
rc,stdout,stderr = command(*args, input=None, **kwargs)
All positional parameters *args* and all gromacs *kwargs* are passed on
to the Gromacs command. input and output keywords allow communication
with the process via the python subprocess module.
:Arguments:
*input* : string, sequence
to be fed to the process' standard input;
elements of a sequence are concatenated with
newlines, including a trailing one [``None``]
*stdin*
``None`` or automatically set to ``PIPE`` if input given [``None``]
*stdout*
how to handle the program's stdout stream [``None``]
filehandle
anything that behaves like a file object
``None`` or ``True``
to see output on screen
``False`` or ``PIPE``
returns the output as a string in the stdout parameter
*stderr*
how to handle the stderr stream [``None``]
``STDOUT``
merges standard error with the standard out stream
``False`` or ``PIPE``
returns the output as a string in the stderr return parameter
``None`` or ``True``
keeps it on stderr (and presumably on screen)
Depending on the value of the GromacsWrapper flag
:data:`gromacs.environment.flags```['capture_output']`` the above
default behaviour can be different.
All other kwargs are passed on to the Gromacs tool.
:Returns:
The shell return code rc of the command is always returned. Depending
on the value of output, various strings are filled with output from the
command.
:Notes:
In order to chain different commands via pipes one must use the special
:class:`PopenWithInput` object (see :meth:`GromacsCommand.Popen` method) instead of the simple
call described here and first construct the pipeline explicitly and then
call the :meth:`PopenWithInput.communicate` method.
``STDOUT`` and ``PIPE`` are objects provided by the :mod:`subprocess` module. Any
python stream can be provided and manipulated. This allows for chaining
of commands. Use ::
from subprocess import PIPE, STDOUT
when requiring these special streams (and the special boolean
switches ``True``/``False`` cannot do what you need.)
(TODO: example for chaining commands)
"""
return self.run(*args,**kwargs)
class GromacsCommand(Command):
"""Base class for wrapping a g_* command.
Limitations: User must have sourced ``GMXRC`` so that the python script can
inherit the environment and find the gromacs programs.
The class doc string is dynamically replaced by the documentation of the
gromacs command when an instance is created.
"""
# TODO: setup the environment from GMXRC (can use env=DICT in Popen/call)
command_name = None
doc_pattern = """.*?(?P<DOCS>DESCRIPTION.*)"""
gmxfatal_pattern = """----+\n # ---- decorator line
\s*Program\s+(?P<program_name>\w+), # Program name,
\s+VERSION\s+(?P<version>[\w.]+)\s*\n # VERSION 4.0.5
(?P<message>.*?)\n # full message, multiple lines
\s* # empty line (?)
----+\n # ---- decorator line
"""
# matches gmx_fatal() output
# -------------------------------------------------------
# Program <program_name>, VERSION <version>
# ... <message>
# -------------------------------------------------------
#: Available failure modes.
failuremodes = ('raise', 'warn', None)
def __init__(self, *args, **kwargs):
"""Set up the command with gromacs flags as keyword arguments.
The following are generic instructions; refer to the Gromacs
command usage information that should have appeared before
this generic documentation.
As an example, a generic Gromacs command could use the following flags::
cmd = GromacsCommand('v', f=['md1.xtc','md2.xtc'], o='processed.xtc', t=200, ...)
which would correspond to running the command in the shell as ::
GromacsCommand -v -f md1.xtc md2.xtc -o processed.xtc -t 200
**Gromacs command line arguments**
Gromacs boolean switches (such as ``-v``) are given as python
positional arguments (``'v'``) or as keyword argument (``v=True``);
note the quotes in the first case. Negating a boolean switch can be
done with ``'nov'``, ``nov=True`` or ``v=False`` (and even ``nov=False``
works as expected: it is the same as ``v=True``).
Any Gromacs options that take parameters are handled as keyword
arguments. If an option takes multiple arguments (such as the
multi-file input ``-f file1 file2 ...``) then the list of files must be
supplied as a python list.
If a keyword has the python value ``None`` then it will *not* be
added to the Gromacs command line; this allows for flexible
scripting if it is not known in advance if an input file is
needed. In this case the default value of the gromacs tool
is used.
Keywords must be legal python keywords or the interpreter raises a
:exc:`SyntaxError` but of course Gromacs commandline arguments are
not required to be legal python. In this case "quote" the option
with an underscore (``_``) and the underscore will be silently
stripped. For instance, ``-or`` translates to the illegal keyword
``or`` so it must be underscore-quoted::
cmd(...., _or='mindistres.xvg')
**Command execution**
The command is executed with the :meth:`~GromacsCommand.run` method or by
calling it as a function. The two next lines are equivalent::
cmd(...)
cmd.run(...)
When the command is run one can override options that were given at
initialization or one can add additional ones. The same rules for
supplying Gromacs flags apply as described above.
**Non-Gromacs keyword arguments**
The other keyword arguments (listed below) are not passed on to the
Gromacs tool but determine how the command class behaves. *They are
only useful when instantiating a class*, i.e. they determine how
this tool behaves during all future invocations although it can be
changed by setting :attr:`failuremode`. This is mostly of interest
to developers.
:Keywords:
*failure*
determines how a failure of the gromacs command is treated; it
can be one of the following:
'raise'
raises GromacsError if command fails
'warn'
issue a :exc:`GromacsFailureWarning`
``None``
just continue silently
*doc* : string
additional documentation []
"""
self.__failuremode = None
self.failuremode = kwargs.pop('failure','raise')
self.extra_doc = kwargs.pop('doc',None)
self.gmxargs = self._combineargs(*args, **kwargs)
self.__doc__ = self.gmxdoc
def failuremode():
doc = """mode determines how the GromacsCommand behaves during failure
It can be one of the following:
'raise'
raises GromacsError if command fails
'warn'
issue a :exc:`GromacsFailureWarning`
``None``
just continue silently
"""
def fget(self):
return self.__failuremode
def fset(self, mode):
if not mode in self.failuremodes:
raise ValueError('failuremode must be one of %r' % (self.failuremodes,))
self.__failuremode = mode
return locals()
failuremode = property(**failuremode())
def _combine_arglist(self, args, kwargs):
"""Combine the default values and the supplied values."""
gmxargs = self.gmxargs.copy()
gmxargs.update(self._combineargs(*args,**kwargs))
return (), gmxargs # Gromacs tools don't have positional args --> args = ()
def check_failure(self, result, msg='Gromacs tool failed', command_string=None):
rc, out, err = result
if not command_string is None:
msg += '\nCommand invocation: ' + str(command_string)
had_success = (rc == 0)
if not had_success:
gmxoutput = "\n".join([x for x in [out, err] if not x is None])
m = re.search(self.gmxfatal_pattern, gmxoutput, re.VERBOSE | re.DOTALL)
if m:
formatted_message = ['GMX_FATAL '+line for line in m.group('message').split('\n')]
msg = "\n".join(\
[msg, "Gromacs command %(program_name)r fatal error message:" % m.groupdict()] +
formatted_message)
if self.failuremode == 'raise':
raise GromacsError(rc, msg)
elif self.failuremode == 'warn':
warnings.warn(msg + '\nError code: %r\n' % rc, category=GromacsFailureWarning)
elif self.failuremode is None:
pass
else:
raise ValueError('unknown failure mode %r' % self.failuremode)
return had_success
def _combineargs(self,*args,**kwargs):
"""Add switches as 'options' with value True to the options dict."""
d = {arg: True for arg in args} # switches are kwargs with value True
d.update(kwargs)
return d
def _build_arg_list(self,**kwargs):
"""Build list of arguments from the dict; keys must be valid gromacs flags."""
arglist = []
for flag,value in kwargs.items():
# XXX: check flag against allowed values
flag = str(flag)
if flag.startswith('_'):
flag = flag[1:] # python-illegal keywords are '_'-quoted
if not flag.startswith('-'):
flag = '-' + flag # now flag is guaranteed to start with '-'
if value is True:
arglist.append(flag) # simple command line flag
elif value is False:
if flag.startswith('-no'):
# negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?)
arglist.append('-'+flag[3:])
else:
arglist.append('-no'+flag[1:]) # gromacs switches booleans by prefixing 'no'
elif value is None:
pass # ignore flag = None
else:
try:
arglist.extend([flag] + value) # option with value list
except TypeError:
arglist.extend([flag, value]) # option with single value
return map(str, arglist) # all arguments MUST be strings
def _run_command(self,*args,**kwargs):
"""Execute the gromacs command; see the docs for __call__."""
result, p = super(GromacsCommand, self)._run_command(*args, **kwargs)
self.check_failure(result, command_string=p.command_string)
return result, p
def transform_args(self,*args,**kwargs):
"""Combine arguments and turn them into gromacs tool arguments."""
newargs = self._combineargs(*args,**kwargs)
return self._build_arg_list(**newargs)
def _get_gmx_docs(self):
"""Extract standard gromacs doc by running the program and chopping the header.
.. Note::
The header is on STDOUT and is ignored. The docs are read from STDERR.
"""
# Uses the class-wide arguments so that 'canned invocations' in cbook
# are accurately reflected. Might be a problem when these invocations
# supply wrong arguments... TODO: maybe check rc for that?
# use_input=False needed for running commands in cbook that have input pre-defined
# temporarily throttle logger to avoid reading about the help function invocation or not found
logging.disable(logging.CRITICAL)
try:
rc,header,docs = self.run('h', stdout=PIPE, stderr=PIPE, use_input=False)
finally:
logging.disable(logging.NOTSET) # ALWAYS restore logging....
m = re.match(self.doc_pattern, docs, re.DOTALL) # keep from DESCRIPTION onwards
if m is None:
return "(No Gromacs documentation available)"
return m.group('DOCS')
@property
def gmxdoc(self):
"""Usage for the underlying Gromacs tool (cached)."""
if not (hasattr(self, '__doc_cache') and self.__doc_cache):
self.__doc_cache = self._get_gmx_docs()
docs = self.__doc_cache
if self.extra_doc:
docs = '\n'.join([self.extra_doc,'',
"Documentation of the gromacs tool", 34*'=',
docs])
return docs
class GromacsGMXCommand(GromacsCommand):
"""Base class for wrapping a ``gmx <name>`` command.
Limitations: User must have sourced ``GMXRC`` so that the python script can
inherit the environment and find the gromacs programs.
The class doc string is dynamically replaced by the documentation of the
gromacs command when an instance is created.
"""
driver = "gmx"
doc_pattern = """.*?(?P<DOCS>SYNOPSIS.*)"""
def _commandline(self, *args, **kwargs):
"""Returns the command line (without pipes) as a list."""
# transform_args() is a hook (used in GromacsCommand very differently!)
return [self.driver, self.command_name] + self.transform_args(*args,**kwargs)
class PopenWithInput(subprocess.Popen):
"""Popen class that knows its input.
1. Set up the instance, including all the input it shoould receive.
2. Call :meth:`PopenWithInput.communicate` later.
.. Note:: Some versions of python have a bug in the subprocess module
( `issue 5179`_ ) which does not clean up open file
descriptors. Eventually code (such as this one) fails with the
error:
*OSError: [Errno 24] Too many open files*
A weak workaround is to increase the available number of open
file descriptors with ``ulimit -n 2048`` and run analysis in
different scripts.
.. _issue 5179: http://bugs.python.org/issue5179
"""
def __init__(self,*args,**kwargs):
"""Initialize with the standard :class:`subprocess.Popen` arguments.
:Keywords:
*input*
string that is piped into the command
"""
kwargs.setdefault('close_fds', True) # fixes 'Too many open fds' with 2.6
self.input = kwargs.pop('input',None)
self.command = args[0]
try:
input_string = 'printf "' + \
self.input.replace('\n','\\n') + '" | ' # display newlines
except (TypeError, AttributeError):
input_string = ""
self.command_string = input_string + " ".join(self.command)
super(PopenWithInput,self).__init__(*args,**kwargs)
def communicate(self, use_input=True):
"""Run the command, using the input that was set up on __init__ (for *use_input* = ``True``)"""
if use_input:
return super(PopenWithInput,self).communicate(self.input)
else:
return super(PopenWithInput,self).communicate()
def __str__(self):
return "<Popen on %r>" % self.command_string
| pslacerda/GromacsWrapper | gromacs/core.py | Python | gpl-3.0 | 29,173 | [
"Gromacs"
] | fd27e388c57bc84d66de1cbc8a644222508b9a178d18e380b248b59ca6119267 |
"""
Unit tests for the module.
Thomas Ogden <[email protected]>
"""
import os
import unittest
import numpy as np
from maxwellbloch import mb_solve, t_funcs, spectral, utility
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
class TestInit(unittest.TestCase):
def test_init_default(self):
""" Test Default Initialise """
mb_solve_00 = mb_solve.MBSolve()
self.assertEqual(mb_solve_00.atom.num_states, 1)
# TODO: And the rest!
def test_init_00(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_01 = mb_solve.MBSolve().from_json(json_path)
@unittest.skip("TODO")
class TestSolveOverThermalDetunings(unittest.TestCase):
def test_00(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
result_Delta = mb_solve_00.solve_over_thermal_detunings()
self.assertEqual(len(result_Delta),
len(mb_solve_00.thermal_delta_list))
class TestMBSolve(unittest.TestCase):
def test_mb_solve(self):
""" Basic test of mb_solve method. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
mb_solve_00.mbsolve()
def test_no_atoms(self):
""" Setting the number density ampl to 0.0, i.e. no atoms. The end
pulse should be the same as the start. """
json_path = os.path.join(JSON_DIR, "mb_solve_no_atoms.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve(step='euler')
self.assertEqual(mbs.Omegas_zt.shape, (1, 5, 101))
# Check that the field at the end of the medium matches the field
# at the start of the medium.
self.assertTrue(np.allclose(mbs.Omegas_zt[:, 0, :],
mbs.Omegas_zt[:, -1, :], rtol=1.0e-6))
def test_no_atoms_ab(self):
""" Setting the number density to 0.0, i.e. no atoms, with AB step. """
json_path = os.path.join(JSON_DIR, "mb_solve_no_atoms.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve(step='ab')
# Check that the field at the end of the medium matches the field
# at the start of the medium.
self.assertTrue(np.allclose(mbs.Omegas_zt[:, 0, :],
mbs.Omegas_zt[:, -1, :], rtol=1.0e-6))
def test_no_decays(self):
""" Empty decay list. """
json_path = os.path.join(JSON_DIR, "mb_solve_no_decays.json")
mb_solve_nd = mb_solve.MBSolve().from_json(json_path)
mb_solve_nd.mbsolve()
def test_no_rabi_freq_t_func(self):
""" Empty decay list. TODO: No mbsolve, should be in init"""
json_path = os.path.join(JSON_DIR, "mb_solve_no_rabi_freq_t_func.json")
mbs = mb_solve.MBSolve().from_json(json_path)
# self.assertEqual(mbs.ob_atom.fields[0].rabi_freq_t_func,
# t_funcs.square_1)
self.assertDictEqual(mbs.atom.fields[0].rabi_freq_t_args,
{"ampl_0": 1.0, "on_0": 0.0, "off_0": 1.0})
def test_two_gaussian_2pi(self):
""" Test of a gaussian input 2pi soliton propagating through a two-level
system.
"""
json_path = os.path.join(JSON_DIR, "mbs_two_gaussian_2pi.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve()
# Input pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][0]/(np.pi), 2.0, places=1)
# Output pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][-1]/(np.pi), 2.0, places=1)
def test_two_gaussian_2pi_n_pi(self):
""" Test of a gaussian input 2pi soliton propagating through a two-level
system.
"""
json_path = os.path.join(JSON_DIR, "mbs_two_gaussian_2pi_n_pi.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve()
# Input pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][0]/(np.pi), 2.0, places=1)
# Output pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][-1]/(np.pi), 2.0, places=1)
def test_two_sech_2pi(self):
""" Test of a 2pi soliton propagating through a two-level system.
"""
json_path = os.path.join(JSON_DIR, "mbs_two_sech_2pi.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve()
# Input pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][0]/(np.pi), 2.0, places=1)
# Output pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][-1]/(np.pi), 2.0, places=1)
def test_two_sech_2pi_n_pi(self):
""" Test of a 2pi soliton propagating through a two-level system,
passing n_pi.
"""
json_path = os.path.join(JSON_DIR, "mbs_two_sech_2pi_n_pi.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve()
# Input pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][0]/(np.pi), 2.0, places=1)
# Output pulse is 2pi
self.assertAlmostEqual(mbs.fields_area()[0][-1]/(np.pi), 2.0, places=1)
def test_no_vel_classes(self):
""" Empty velocity class dict. """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
vc = {}
mbs.build_velocity_classes(vc)
mbs.mbsolve()
def test_no_vel_classes_inner(self):
""" No inner delta values in dict. TODO: No mbsolve, should be init"""
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
vc = {
"thermal_delta_min": -1.0,
"thermal_delta_max": 1.0,
"thermal_delta_steps": 2,
"thermal_width": 1.0
}
mbs.build_velocity_classes(vc)
mbs.mbsolve()
def test_zero_thermal_width(self):
"""TODO: No mbsolve, should be in init"""
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
vc = {
"thermal_delta_min": -1.0,
"thermal_delta_max": 1.0,
"thermal_delta_steps": 2,
"thermal_delta_inner_min": 0.0,
"thermal_delta_inner_max": 0.0,
"thermal_delta_inner_steps": 0,
"thermal_width": 0.0
}
self.assertRaises(ValueError, mbs.build_velocity_classes, vc)
def test_vel_classes(self):
"""Tests that for a linear two-level system with velocity classes, the
absorption matches the known Voigt profile.
"""
json_path = os.path.join(JSON_DIR, "velocity-classes.json")
mbs = mb_solve.MBSolve().from_json(json_path)
mbs.mbsolve()
freq_list = spectral.freq_list(mbs)
abs = spectral.absorption(mbs, 0, -1)
voigt = spectral.voigt_two_linear_known(freq_list, 1.0, 0.05).imag
# Assert that the max of the abs residuals between the absorption
# profile and the known broadened Voigt absorption profile for linear
# two-level systems is below a tolerance
self.assertTrue(np.max(np.abs(abs - voigt)) < 0.05)
class TestSaveLoad(unittest.TestCase):
""" Tests for the MBSolve save and load methods. """
def test_save_load_01(self):
""" Solve a basic MBSolve problem. Save the results to file. Set the
results in the MBSolve object to null. Load the results from
file and check that they equal the original values.
"""
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_01 = mb_solve.MBSolve().from_json(json_path)
Omegas_zt, states_zt = mb_solve_01.mbsolve()
mb_solve_01.save_results()
mb_solve_01.Omegas_zt = None
mb_solve_01.states_zt = None
mb_solve_01.load_results()
Omegas_zt_loaded = mb_solve_01.Omegas_zt
states_zt_loaded = mb_solve_01.states_zt
self.assertTrue((Omegas_zt == Omegas_zt_loaded).all())
self.assertTrue((states_zt == states_zt_loaded).all())
def test_save_load_no_recalc(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_01 = mb_solve.MBSolve().from_json(json_path)
Omegas_zt, states_zt = mb_solve_01.mbsolve()
mb_solve_01.save_results()
mb_solve_01.Omegas_zt = None
mb_solve_01.states_zt = None
Omegas_zt, states_zt = mb_solve_01.mbsolve(recalc=False)
Omegas_zt_loaded = mb_solve_01.Omegas_zt
states_zt_loaded = mb_solve_01.states_zt
self.assertTrue((Omegas_zt == Omegas_zt_loaded).all())
self.assertTrue((states_zt == states_zt_loaded).all())
class TestBuildZlist(unittest.TestCase):
def test_00(self):
mb_solve_00 = mb_solve.MBSolve()
zlist = np.array([0., .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.])
self.assertTrue(np.allclose(mb_solve_00.zlist, zlist, rtol=1.0e-6))
class TestGetOmegasIntpTFuncs(unittest.TestCase):
""" Unit tests of the get_Omegas_intp_t_funcs method """
def test_one_field(self):
""" For the case of a single field """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
self.assertEqual(mb_solve_00.get_Omegas_intp_t_funcs(),
['intp'])
def test_two_fields(self):
""" For the case of two fields """
json_path = os.path.join(JSON_DIR, "mb_solve_lamda.json")
mb_solve_lamda = mb_solve.MBSolve().from_json(json_path)
self.assertEqual(mb_solve_lamda.get_Omegas_intp_t_funcs(),
['intp', 'intp'])
class TestGetOmegasIntpTArgs(unittest.TestCase):
""" Unit tests of the get_Omegas_intp_t_args method """
def test_one_field(self):
""" For the case of a single field """
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mb_solve_00 = mb_solve.MBSolve().from_json(json_path)
Omegas_z = mb_solve_00.Omegas_zt[:, 0, :]
t_args = mb_solve_00.get_Omegas_intp_t_args(Omegas_z)
self.assertEqual(len(t_args), 1)
self.assertTrue(np.all(t_args[0]['tlist'] == mb_solve_00.tlist))
self.assertTrue(np.all(t_args[0]['ylist'] == Omegas_z/(2.0*np.pi)))
class TestPopulations(unittest.TestCase):
def test_twolevel_shape(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
pop_lower = mbs.populations([0])
pop_upper = mbs.populations([1])
np.testing.assert_allclose(pop_lower, np.zeros((mbs.z_steps+1,
mbs.t_steps+1)))
np.testing.assert_allclose(pop_upper, np.zeros((mbs.z_steps+1,
mbs.t_steps+1)))
class TestPopulationsField(unittest.TestCase):
def test_twolevel_shape(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
pop_upper = mbs.populations_field(field_idx=0, upper=True)
pop_lower = mbs.populations_field(field_idx=0, upper=False)
np.testing.assert_allclose(pop_lower, np.zeros((mbs.z_steps+1,
mbs.t_steps+1)))
np.testing.assert_allclose(pop_upper, np.zeros((mbs.z_steps+1,
mbs.t_steps+1)))
class TestCoherences(unittest.TestCase):
def test_twolevel_shape(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
coh = mbs.coherences([[0, 1]])
np.testing.assert_allclose(coh, np.zeros((mbs.z_steps+1,
mbs.t_steps+1)))
class TestCoherencesField(unittest.TestCase):
def test_twolevel_shape(self):
json_path = os.path.join(JSON_DIR, "mb_solve_01.json")
mbs = mb_solve.MBSolve().from_json(json_path)
coh = mbs.coherences_field(field_idx=0)
np.testing.assert_allclose(coh, np.zeros((mbs.z_steps+1,
mbs.t_steps+1)))
| tommyogden/maxwellbloch | maxwellbloch/tests/test_mb_solve.py | Python | mit | 12,255 | [
"Gaussian"
] | da505c5672d383276369e2d681bba230df4a29edebbeb3b956bd1f47e0689899 |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import scipy
from scipy.linalg import expm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
def ad_hoc_data(training_size, test_size, n, gap, PLOT_DATA):
class_labels = [r'A', r'B']
if n == 2:
N = 100
elif n == 3:
N = 20 # courseness of data seperation
label_train = np.zeros(2*(training_size+test_size))
sample_train = []
sampleA = [[0 for x in range(n)] for y in range(training_size+test_size)]
sampleB = [[0 for x in range(n)] for y in range(training_size+test_size)]
sample_Total = [[[0 for x in range(N)] for y in range(N)] for z in range(N)]
interactions = np.transpose(np.array([[1, 0], [0, 1], [1, 1]]))
steps = 2*np.pi/N
sx = np.array([[0, 1], [1, 0]])
X = np.asmatrix(sx)
sy = np.array([[0, -1j], [1j, 0]])
Y = np.asmatrix(sy)
sz = np.array([[1, 0], [0, -1]])
Z = np.asmatrix(sz)
J = np.array([[1, 0], [0, 1]])
J = np.asmatrix(J)
H = np.array([[1, 1], [1, -1]])/np.sqrt(2)
H2 = np.kron(H, H)
H3 = np.kron(H, H2)
H = np.asmatrix(H)
H2 = np.asmatrix(H2)
H3 = np.asmatrix(H3)
f = np.arange(2**n)
my_array = [[0 for x in range(n)] for y in range(2**n)]
for arindex in range(len(my_array)):
temp_f = bin(f[arindex])[2:].zfill(n)
for findex in range(n):
my_array[arindex][findex] = int(temp_f[findex])
my_array = np.asarray(my_array)
my_array = np.transpose(my_array)
# Define decision functions
maj = (-1)**(2*my_array.sum(axis=0) > n)
parity = (-1)**(my_array.sum(axis=0))
dict1 = (-1)**(my_array[0])
if n == 2:
D = np.diag(parity)
elif n == 3:
D = np.diag(maj)
Basis = np.random.random((2**n, 2**n)) + 1j*np.random.random((2**n, 2**n))
Basis = np.asmatrix(Basis).getH()*np.asmatrix(Basis)
[S, U] = np.linalg.eig(Basis)
idx = S.argsort()[::-1]
S = S[idx]
U = U[:, idx]
M = (np.asmatrix(U)).getH()*np.asmatrix(D)*np.asmatrix(U)
psi_plus = np.transpose(np.ones(2))/np.sqrt(2)
psi_0 = 1
for k in range(n):
psi_0 = np.kron(np.asmatrix(psi_0), np.asmatrix(psi_plus))
sample_total_A = []
sample_total_B = []
sample_total_void = []
if n == 2:
for n1 in range(N):
for n2 in range(N):
x1 = steps*n1
x2 = steps*n2
phi = x1*np.kron(Z, J) + x2*np.kron(J, Z) + (np.pi-x1)*(np.pi-x2)*np.kron(Z, Z)
Uu = scipy.linalg.expm(1j*phi)
psi = np.asmatrix(Uu)*H2*np.asmatrix(Uu)*np.transpose(psi_0)
temp = np.asscalar(np.real(psi.getH()*M*psi))
if temp > gap:
sample_Total[n1][n2] = +1
elif temp < -gap:
sample_Total[n1][n2] = -1
else:
sample_Total[n1][n2] = 0
# Now sample randomly from sample_Total a number of times training_size+testing_size
tr = 0
while tr < (training_size+test_size):
draw1 = np.random.choice(N)
draw2 = np.random.choice(N)
if sample_Total[draw1][draw2] == +1:
sampleA[tr] = [2*np.pi*draw1/N, 2*np.pi*draw2/N]
tr += 1
tr = 0
while tr < (training_size+test_size):
draw1 = np.random.choice(N)
draw2 = np.random.choice(N)
if sample_Total[draw1][draw2] == -1:
sampleB[tr] = [2*np.pi*draw1/N, 2*np.pi*draw2/N]
tr += 1
sample_train = [sampleA, sampleB]
for lindex in range(training_size+test_size):
label_train[lindex] = 0
for lindex in range(training_size+test_size):
label_train[training_size+test_size+lindex] = 1
label_train = label_train.astype(int)
sample_train = np.reshape(sample_train, (2*(training_size+test_size), n))
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
img = plt.imshow(np.asmatrix(sample_Total).T, interpolation='nearest',
origin='lower', cmap='copper', extent=[0, 2*np.pi, 0, 2*np.pi])
plt.show()
fig2 = plt.figure()
for k in range(0, 2):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Ad-hoc Data")
plt.show()
elif n == 3:
for n1 in range(N):
for n2 in range(N):
for n3 in range(N):
x1 = steps*n1
x2 = steps*n2
x3 = steps*n3
phi = x1*np.kron(np.kron(Z, J), J) + x2*np.kron(np.kron(J, Z), J) + x3*np.kron(np.kron(J, J), Z) + \
(np.pi-x1)*(np.pi-x2)*np.kron(np.kron(Z, Z), J)+(np.pi-x2)*(np.pi-x3)*np.kron(np.kron(J, Z), Z) + \
(np.pi-x1)*(np.pi-x3)*np.kron(np.kron(Z, J), Z)
Uu = scipy.linalg.expm(1j*phi)
psi = np.asmatrix(Uu)*H3*np.asmatrix(Uu)*np.transpose(psi_0)
temp = np.asscalar(np.real(psi.getH()*M*psi))
if temp > gap:
sample_Total[n1][n2][n3] = +1
sample_total_A.append([n1, n2, n3])
elif temp < -gap:
sample_Total[n1][n2][n3] = -1
sample_total_B.append([n1, n2, n3])
else:
sample_Total[n1][n2][n3] = 0
sample_total_void.append([n1, n2, n3])
# Now sample randomly from sample_Total a number of times training_size+testing_size
tr = 0
while tr < (training_size+test_size):
draw1 = np.random.choice(N)
draw2 = np.random.choice(N)
draw3 = np.random.choice(N)
if sample_Total[draw1][draw2][draw3] == +1:
sampleA[tr] = [2*np.pi*draw1/N, 2*np.pi*draw2/N, 2*np.pi*draw3/N]
tr += 1
tr = 0
while tr < (training_size+test_size):
draw1 = np.random.choice(N)
draw2 = np.random.choice(N)
draw3 = np.random.choice(N)
if sample_Total[draw1][draw2][draw3] == -1:
sampleB[tr] = [2*np.pi*draw1/N, 2*np.pi*draw2/N, 2*np.pi*draw3/N]
tr += 1
sample_train = [sampleA, sampleB]
for lindex in range(training_size+test_size):
label_train[lindex] = 0
for lindex in range(training_size+test_size):
label_train[training_size+test_size+lindex] = 1
label_train = label_train.astype(int)
sample_train = np.reshape(sample_train, (2*(training_size+test_size), n))
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
sample_total_A = np.asarray(sample_total_A)
sample_total_B = np.asarray(sample_total_B)
x1 = sample_total_A[:, 0]
y1 = sample_total_A[:, 1]
z1 = sample_total_A[:, 2]
x2 = sample_total_B[:, 0]
y2 = sample_total_B[:, 1]
z2 = sample_total_B[:, 2]
fig1 = plt.figure()
ax1 = fig1.add_subplot(1, 1, 1, projection='3d')
ax1.scatter(x1, y1, z1, c='#8A360F')
plt.show()
#
fig2 = plt.figure()
ax2 = fig2.add_subplot(1, 1, 1, projection='3d')
ax2.scatter(x2, y2, z2, c='#683FC8')
plt.show()
sample_training_A = training_input['A']
sample_training_B = training_input['B']
x1 = sample_training_A[:, 0]
y1 = sample_training_A[:, 1]
z1 = sample_training_A[:, 2]
x2 = sample_training_B[:, 0]
y2 = sample_training_B[:, 1]
z2 = sample_training_B[:, 2]
fig1 = plt.figure()
ax1 = fig1.add_subplot(1, 1, 1, projection='3d')
ax1.scatter(x1, y1, z1, c='#8A360F')
ax1.scatter(x2, y2, z2, c='#683FC8')
plt.show()
return sample_Total, training_input, test_input, class_labels
def sample_ad_hoc_data(sample_Total, test_size, n):
tr = 0
class_labels = [r'A', r'B'] # copied from ad_hoc_data()
if n == 2:
N = 100
elif n == 3:
N = 20
label_train = np.zeros(2*test_size)
sampleA = [[0 for x in range(n)] for y in range(test_size)]
sampleB = [[0 for x in range(n)] for y in range(test_size)]
while tr < (test_size):
draw1 = np.random.choice(N)
draw2 = np.random.choice(N)
if sample_Total[draw1][draw2] == +1:
sampleA[tr] = [2*np.pi*draw1/N, 2*np.pi*draw2/N]
tr += 1
tr = 0
while tr < (test_size):
draw1 = np.random.choice(N)
draw2 = np.random.choice(N)
if sample_Total[draw1][draw2] == -1:
sampleB[tr] = [2*np.pi*draw1/N, 2*np.pi*draw2/N]
tr += 1
sample_train = [sampleA, sampleB]
for lindex in range(test_size):
label_train[lindex] = 0
for lindex in range(test_size):
label_train[test_size+lindex] = 1
label_train = label_train.astype(int)
sample_train = np.reshape(sample_train, (2 * test_size, n))
test_input = {key: (sample_train[label_train == k, :])[:] for k, key in enumerate(class_labels)}
return test_input
def Breast_cancer(training_size, test_size, n, PLOT_DATA):
class_labels = [r'A', r'B']
data, target = datasets.load_breast_cancer(True)
sample_train, sample_test, label_train, label_test = train_test_split(data, target, test_size=0.3, random_state=12)
# Now we standarize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
for k in range(0, 2):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("PCA dim. reduced Breast cancer dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
def Digits(training_size, test_size, n, PLOT_DATA):
class_labels = [r'A', r'B', r'C', r'D', r'E', r'F', r'G', r'H', r'I', r'J']
data = datasets.load_digits()
sample_train, sample_test, label_train, label_test = train_test_split(
data.data, data.target, test_size=0.3, random_state=22)
# Now we standarize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
for k in range(0, 9):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("PCA dim. reduced Digits dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
def Iris(training_size, test_size, n, PLOT_DATA):
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_iris(True)
sample_train, sample_test, label_train, label_test = train_test_split(data, target, test_size=1, random_state=42)
# Now we standarize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Iris dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
def Wine(training_size, test_size, n, PLOT_DATA):
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_wine(True)
sample_train, sample_test, label_train, label_test = train_test_split(data, target, test_size=test_size, random_state=7)
# Now we standarize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("PCA dim. reduced Wine dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
def Gaussian(training_size, test_size, n, PLOT_DATA):
sigma = 1
if n == 2:
class_labels = [r'A', r'B']
label_train = np.zeros(2*(training_size+test_size))
sample_train = []
sampleA = [[0 for x in range(n)] for y in range(training_size+test_size)]
sampleB = [[0 for x in range(n)] for y in range(training_size+test_size)]
randomized_vector1 = np.random.randint(2, size=n)
randomized_vector2 = (randomized_vector1+1) % 2
for tr in range(training_size+test_size):
for feat in range(n):
if randomized_vector1[feat] == 0:
sampleA[tr][feat] = np.random.normal(-1/2, sigma, None)
elif randomized_vector1[feat] == 1:
sampleA[tr][feat] = np.random.normal(1/2, sigma, None)
else:
print('Nope')
if randomized_vector2[feat] == 0:
sampleB[tr][feat] = np.random.normal(-1/2, sigma, None)
elif randomized_vector2[feat] == 1:
sampleB[tr][feat] = np.random.normal(1/2, sigma, None)
else:
print('Nope')
sample_train = [sampleA, sampleB]
for lindex in range(training_size+test_size):
label_train[lindex] = 0
for lindex in range(training_size+test_size):
label_train[training_size+test_size+lindex] = 1
label_train = label_train.astype(int)
sample_train = np.reshape(sample_train, (2*(training_size+test_size), n))
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
fig1 = plt.figure()
for k in range(0, 2):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Gaussians")
plt.show()
return sample_train, training_input, test_input, class_labels
elif n == 3:
class_labels = [r'A', r'B', r'C']
label_train = np.zeros(3*(training_size+test_size))
sample_train = []
sampleA = [[0 for x in range(n)] for y in range(training_size+test_size)]
sampleB = [[0 for x in range(n)] for y in range(training_size+test_size)]
sampleC = [[0 for x in range(n)] for y in range(training_size+test_size)]
randomized_vector1 = np.random.randint(3, size=n)
randomized_vector2 = (randomized_vector1+1) % 3
randomized_vector3 = (randomized_vector2+1) % 3
for tr in range(training_size+test_size):
for feat in range(n):
if randomized_vector1[feat] == 0:
sampleA[tr][feat] = np.random.normal(2*1*np.pi/6, sigma, None)
elif randomized_vector1[feat] == 1:
sampleA[tr][feat] = np.random.normal(2*3*np.pi/6, sigma, None)
elif randomized_vector1[feat] == 2:
sampleA[tr][feat] = np.random.normal(2*5*np.pi/6, sigma, None)
else:
print('Nope')
if randomized_vector2[feat] == 0:
sampleB[tr][feat] = np.random.normal(2*1*np.pi/6, sigma, None)
elif randomized_vector2[feat] == 1:
sampleB[tr][feat] = np.random.normal(2*3*np.pi/6, sigma, None)
elif randomized_vector2[feat] == 2:
sampleB[tr][feat] = np.random.normal(2*5*np.pi/6, sigma, None)
else:
print('Nope')
if randomized_vector3[feat] == 0:
sampleC[tr][feat] = np.random.normal(2*1*np.pi/6, sigma, None)
elif randomized_vector3[feat] == 1:
sampleC[tr][feat] = np.random.normal(2*3*np.pi/6, sigma, None)
elif randomized_vector3[feat] == 2:
sampleC[tr][feat] = np.random.normal(2*5*np.pi/6, sigma, None)
else:
print('Nope')
sample_train = [sampleA, sampleB, sampleC]
for lindex in range(training_size+test_size):
label_train[lindex] = 0
for lindex in range(training_size+test_size):
label_train[training_size+test_size+lindex] = 1
for lindex in range(training_size+test_size):
label_train[training_size+test_size+training_size+test_size+lindex] = 2
label_train = label_train.astype(int)
sample_train = np.reshape(sample_train, (3*(training_size+test_size), n))
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_train[label_train == k, :])[training_size:(
training_size+test_size)] for k, key in enumerate(class_labels)}
if PLOT_DATA:
fig1 = plt.figure()
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Gaussians")
plt.show()
return sample_train, training_input, test_input, class_labels
else:
print("Gaussian presently only supports 2 or 3 qubits")
| antoniomezzacapo/qiskit-tutorial | community/aqua/artificial_intelligence/datasets.py | Python | apache-2.0 | 22,782 | [
"Gaussian"
] | 83ee06babdc0f5c11522cc2e9df559a42a8f7b6515c1c6a37440be639c8451fb |
"""
==============================================
Denoise images using Non-Local Means (NLMEANS)
==============================================
Using the non-local means filter [Coupe08]_ and [Coupe11]_ and you can denoise
3D or 4D images and boost the SNR of your datasets. You can also decide between
modeling the noise as Gaussian or Rician (default).
"""
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from time import time
from dipy.denoise.non_local_means import non_local_means
from dipy.denoise.nlmeans import nlmeans
from dipy.denoise.noise_estimate import estimate_sigma
from dipy.data import fetch_sherbrooke_3shell, read_sherbrooke_3shell
fetch_sherbrooke_3shell()
img, gtab = read_sherbrooke_3shell()
data = img.get_data()
affine = img.affine
mask = data[..., 0] > 80
# We select only one volume for the example to run quickly.
data = data[..., 1]
print("vol size", data.shape)
# lets create a noisy data with gaussian data
"""
In order to call ``non_local_means`` first you need to estimate the standard
deviation of the noise. We use N=4 since the Sherbrooke dataset was acquired
on a 1.5T Siemens scanner with a 4 array head coil.
"""
sigma = estimate_sigma(data, N=4)
t = time()
"""
Calling the main function ``non_local_means``
"""
den = non_local_means(
data,
sigma=sigma,
mask=mask,
patch_radius=1,
block_radius=1,
rician=True)
print("total time", time() - t)
t = time()
den = nlmeans(data, sigma=sigma, mask=mask, patch_radius= 1, block_radius = 1, rician= True)
print("total time", time() - t)
"""
Let us plot the axial slice of the denoised output
"""
axial_middle = data.shape[2] / 2
before = data[:, :, axial_middle].T
after = den[:, :, axial_middle].T
difference = np.abs(after.astype('f8') - before.astype('f8'))
difference[~mask[:, :, axial_middle].T] = 0
fig, ax = plt.subplots(1, 3)
ax[0].imshow(before, cmap='gray', origin='lower')
ax[0].set_title('before')
ax[1].imshow(after, cmap='gray', origin='lower')
ax[1].set_title('after')
ax[2].imshow(difference, cmap='gray', origin='lower')
ax[2].set_title('difference')
plt.savefig('denoised.png', bbox_inches='tight')
"""
.. figure:: denoised.png
:align: center
**Showing axial slice before (left) and after (right) NLMEANS denoising**
"""
nib.save(nib.Nifti1Image(den, affine), 'denoised.nii.gz')
"""
An improved version of non-local means denoising is adaptive soft coefficient
matching, please refer to :ref:`example_denoise_ascm` for more details.
References
----------
.. [Coupe08] P. Coupe, P. Yger, S. Prima, P. Hellier, C. Kervrann, C. Barillot,
"An Optimized Blockwise Non Local Means Denoising Filter for 3D Magnetic
Resonance Images", IEEE Transactions on Medical Imaging, 27(4):425-441, 2008
.. [Coupe11] Pierrick Coupe, Jose Manjon, Montserrat Robles, Louis Collins.
"Adaptive Multiresolution Non-Local Means Filter for 3D MR Image Denoising"
IET Image Processing, Institution of Engineering and Technology, 2011
.. include:: ../links_names.inc
"""
| villalonreina/dipy | doc/examples/denoise_nlmeans.py | Python | bsd-3-clause | 3,046 | [
"Gaussian"
] | 156a7b2623b8173997821d7acf4ba32d300fec656bfb744be2dd9dda02ea4b33 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Visit(CMakePackage):
"""VisIt is an Open Source, interactive, scalable, visualization,
animation and analysis tool."""
homepage = "https://wci.llnl.gov/simulation/computer-codes/visit/"
url = "http://portal.nersc.gov/project/visit/releases/2.10.1/visit2.10.1.tar.gz"
version('2.13.0', '716644b8e78a00ff82691619d4d1e7a914965b6535884890b667b97ba08d6a0f')
version('2.12.3', '2dd351a291ee3e79926bc00391ca89b202cfa4751331b0fdee1b960c7922161f')
version('2.12.2', '355779b1dbf440cdd548526eecd77b60')
version('2.10.3', 'a1082a6f6dab3e2dcb58993603456c2b')
version('2.10.2', '253de0837a9d69fb689befc98ea4d068')
version('2.10.1', '3cbca162fdb0249f17c4456605c4211e')
depends_on('[email protected]:', type='build')
depends_on('[email protected]~opengl2')
depends_on('[email protected]')
depends_on('qwt')
depends_on('python')
depends_on('silo+shared')
depends_on('hdf5')
root_cmakelists_dir = 'src'
def cmake_args(self):
spec = self.spec
qt_bin = spec['qt'].prefix.bin
args = [
'-DVTK_MAJOR_VERSION={0}'.format(spec['vtk'].version[0]),
'-DVTK_MINOR_VERSION={0}'.format(spec['vtk'].version[1]),
'-DVISIT_VTK_DIR:PATH={0}'.format(spec['vtk'].prefix),
'-DVISIT_USE_GLEW=OFF',
'-DVISIT_LOC_QMAKE_EXE:FILEPATH={0}/qmake-qt4'.format(qt_bin),
'-DPYTHON_DIR:PATH={0}'.format(spec['python'].home),
'-DVISIT_SILO_DIR:PATH={0}'.format(spec['silo'].prefix),
'-DVISIT_HDF5_DIR:PATH={0}'.format(spec['hdf5'].prefix),
'-DVISIT_QWT_DIR:PATH={0}'.format(spec['qwt'].prefix)
]
if spec.satisfies('^hdf5+mpi', strict=True):
args.append('-DVISIT_HDF5_MPI_DIR:PATH={0}'.format(
spec['hdf5'].prefix))
return args
| krafczyk/spack | var/spack/repos/builtin/packages/visit/package.py | Python | lgpl-2.1 | 3,084 | [
"VTK",
"VisIt"
] | 7287066b4bc70d5890b68af17bdb8586e9fb4e4865a54465f0a779867339bc6d |
from collections import defaultdict
import datetime
import sys
import threading
import tornado.ioloop
import tornado.web
import urllib
import uuid
from prisoner.workflow import PolicyProcessor, SocialObjectGateway
SERVER_URL = "http://localhost:8888"
class ExperimentBuilder(object):
""" The ExperimentBuilder is the interface for bootstrapping an
experiment with PRISONER. After instantiating an ExperimentBuilder,
complete the following steps:
- call provide_privacy_policy() with the path to your privacy policy XML
file
- call provide_experimental_design() with the path to your experimental
design XML file
- call authenticate_participant() with the id of the participant in this
session
- call authenticate_providers() with a list of services which the
participant must authenticate with to participate
- call build() to generate a pre-experiment flow, which allows
participants to review a human-readable version of your privacy
policy, and to authenticate themselves with providers as needed.
"""
def __init__(self):
self.sog = SocialObjectGateway.SocialObjectsGateway()
self.participant = None
self.providers = None
self.token = str(uuid.uuid4())
self.session = defaultdict(dict)
self.title = None
self.contact = None
self.connection_string = None
self.last_touch = None
def get_props(self, who_for=None):
""" Retrieve the props for a given target (eg. PRISONER or a provider)
:param who_for: the target to retrieve props for
:type who_for: str
"""
props = self.sog.persistence.props
if not who_for:
return props
else:
return props[who_for]
def provide_db_string(self, db_string):
""" Set connection string for this experiment.
:param db_string: conncetion string
:type db_string: str
"""
self.connection_string = db_string
def provide_title(self, title):
""" The title of the experiment as presented to your
participants.
:param title: Friendly experiment title
:type title: str """
self.title = title
def provide_contact(self, contact):
""" How to contact someone in connection with this experiment,
eg. an email address. This should be provided in a form that fits the following
sentence construction:
"Contact the researcher at <contact>."
:param contact: Contact information
:type contact: str
"""
self.contact = contact
def provide_privacy_policy(self, policy):
""" Provide the privacy policy for this experiment.
:param policy: Path to privacy policy file
:type policy: str
"""
self.sog.provide_privacy_policy(policy)
def provide_experimental_design(self, exp_design):
""" Provide the experimental design for this experiment.
:param exp_design: Path to experimental design file
:type exp_design: str
"""
self.sog.provide_experimental_design(exp_design,
self.connection_string)
def authenticate_participant(self, schema, participant_id):
""" Provide the ID of the participant in this experiment. This
participant must exist in the participant table for this
experiment.
:param participant_id: ID of participant
:type participant_id: int
"""
participant = self.sog.persistence.get_participant(schema, participant_id)
self.participant = participant
self.sog.participant = participant
return self.participant
def authenticate_providers(self, providers):
""" Provide a list of provider names this participant needs to
be authenticated with to participate (eg. if they are only using
a subset of providers all participants will be using, only include that subset
in this list). When the experiment is built, each gateway will inject its own
authentication logic.
:param providers: List of providers to authenticate with
:type providers: list[str]
"""
self.providers = providers
def build_schema(self):
""" Constructs the database schema (destroying whatever data
might already exist). This places the database in a state in which participants
may be registered, and experiments run, but does not return usable interfaces to
the rest of the workflow (such as the SocialObjectGateway) """
self.sog.persistence.do_build_schema(drop_first=True)
def build(self, callback_url):
""" Using the information provided by the participation client,
instigate the experiment consent process. This does the
following:
- parse the experimental design and privacy policy and generate
a human-readable document, relevant to the participant, which
also lists which providers the participant will be asked to authenticate with
- creates a temporary web server - the participation client must
access the returned URL using the cookie provided when the
ExperimentBuilder was instantiated
- when the user consents to the policies, each service gateway
for which authentication is needed provides a URL to
authenticate with which the participant is asked to visit in
turn (decorated by additional context from PRISONER for participants'
confidence). Note, this URL must contain the entire
authentication flow, so you may need to host this yourself, particularly if this
involves two (or more) factor authentication as users are bounced between URLs
(many authentication flows expect a URL callback). This flow
must return a token to persist alongside the Participant.
: param callback_url: A callable to be invoked only when consent is
confirmed - ie. the entrypoint for the participation client
:type callback_url: callable
: returns: URL participant must visit to begin consent flow
"""
# does authenticated participant already have a meta row?
# if so, consent already given, so skip consent and load auth,
for provider in self.providers:
provider_auth = self.sog.persistence.get_existing_provider_auth(self.sog.participant[0],
provider)
if provider_auth:
can_auth = self.sog.restore_authentication(provider,
provider_auth)
if can_auth:
self.providers.remove(provider)
# no providers to authenticate
if not self.providers:
return callback_url
# indicate server needs to do a consent flow
self.exp_callback = callback_url
return True
# start server
# DEPRECATED: No more ad-hoc server - this is handled
# by the PRISONER WS server now!
application = tornado.web.Application([
(r"/", ConsentFlowHandler),
(r"/confirm", ProviderAuthentHandler),
(r"/complete", CompleteConsentHandler),
(r".*",CallbackHandler),
], builder=self)
application.listen(8888)
t = threading.Thread(target=tornado.ioloop.IOLoop.instance().start)
t.start()
return "%s/?pctoken=%s" % (SERVER_URL, self.token)
# serve human readable policies
def consent_confirmed(cookie):
""" Called when user with given cookie accepts consent. If
cookie is valid, continue the authentication flow for that participant.
"""
pass
class CompleteConsentHandler(tornado.web.RequestHandler):
""" Called when the user has authenticated themselves with the last
provider necessary. This completes the authentication flow and allows the
experimental application to begin. """
def get(self):
builder = self.application.settings["builder"]
callback_provider = self.get_argument("cbprovider")
builder.sog.complete_authentication(callback_provider,
self.request)
# evoke callback
if "http" in builder.exp_callback:
self.redirect(builder.exp_callback)
else:
self.write(builder.exp_callback)
class ConsentFlowHandler(tornado.web.RequestHandler):
""" This renders the human-readable representation of the privacy
policy and ensures the participant understands the data requirements of the
experimental application before providing consent. """
def get(self):
builder = self.application.settings["builder"]
token = self.get_argument("pctoken")
self.write("Stand back. We're doing science.</br>")
if(token != builder.token):
self.write("Token %s is not %s" % (token, self.token))
return
else:
self.write("(human readable consent here) </br></br>" +\
"Go <a href='confirm?pctoken=%s'>here</a> if you agree to " %builder.token +\
"the invisible information here.")
class CallbackHandler(tornado.web.RequestHandler):
""" Takes a parameter (callback), and calls the unescaped version of
that URL (useful for baking nested params in a callback URL)
"""
def get(self):
url = urllib.unquote(self.request.uri)
url = url.replace("?token","&token") # this is an insane shim for a bug in LFM
url = url.replace("?state","&state") # temp FB shim
self.redirect(url)
class ProviderAuthentHandler(tornado.web.RequestHandler):
""" Called during the authentication flow for each provider. Informs the
participant about the service they are about to authenticate themselves with,
then redirects to the appropriate URL for that service. """
def get(self):
builder = self.application.settings["builder"]
token = self.get_argument("pctoken")
providers = builder.providers
try:
current_provider = self.get_argument("provider")
if current_provider not in providers:
self.write("Invalid provider.")
return
providers.pop()
except:
current_provider = None
self.write("For this experiment, we need you to login to some services.</br>")
provider = providers[len(providers)-1]
self.write("<a href='confirm?provider=%s&pctoken=%s'>Login to"%(provider, token)+\
" %s</a>" % provider)
return
if providers:
callback = "%s/confirm?pctoken=%s&provider=%s&cbprovider=%s" % (SERVER_URL, token,
providers[len(providers)-1], current_provider)
else:
callback = "%s/complete?pctoken=%s&cbprovider=%s" % (SERVER_URL, token,
current_provider)
try:
callback_provider = self.get_argument("cbprovider")
builder.sog.complete_authentication(callback_provider,
self.request)
except:
pass
url = builder.sog.request_authentication(current_provider,
callback=urllib.quote(callback,safe=":/"))
self.redirect(url)
| uoscompsci/PRISONER | prisoner/workflow/ExperimentBuilder.py | Python | bsd-3-clause | 9,921 | [
"VisIt"
] | a40c8b65628183bc02f6210e37fe04d05602d863dd4571781b54baaf47d6ab71 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Introduction: This script is used to get effective number of codons (ENC) from fasta
# Input is a Seq format object (Bio.SeqIO)
# Created by galaxy on 2017/3/10 0010 17:37
from collections import defaultdict
from src.global_items import *
def degenerated():
"""
This function is used to load degenercy AAs.
:return:
"""
degeneracy_dict = defaultdict()
codon_dict = defaultdict()
for amino_acid, codons in degenerated_codons.items():
degeneracy_class = len(codons)
degeneracy_dict[amino_acid] = degeneracy_class
for codon in codons:
codon_dict[codon] = amino_acid
return degeneracy_dict, codon_dict
def read_seq(seq, codon_aa_dict):
aa_dict = defaultdict(int)
seq_codon_dict = defaultdict(int)
max_seq = len(seq)
query_codons = [seq[i:i + 3] for i in range(0, max_seq, 3)]
for each_codon in query_codons:
aa = codon_aa_dict[each_codon]
aa_dict[aa] += 1
seq_codon_dict[each_codon] += 1
return aa_dict, seq_codon_dict
def enc_calculation(aa_dict, codon_dict, seq_codon_dict, degeneracy_dict):
totb = defaultdict(int)
numaa = defaultdict(int)
error_t = False
fold = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for j in degeneracy_dict.keys():
deg = degeneracy_dict[j]
if j == 'STOP': # skip STOP codon
continue
if aa_dict[j] <= 1: # aa_dict[j]: nnaa + i
bb = 0
else:
s2 = 0
for x in codon_dict.keys():
if codon_dict[x] != j:
continue
if seq_codon_dict[x] == 0:
k2 = 0.0
else:
k2 = pow((seq_codon_dict[x] / aa_dict[j]), 2)
s2 += k2
bb = (aa_dict[j] * s2 - 1.0) / (aa_dict[j] - 1.0) # homozygosity
if bb > 0.0000001:
totb[deg] += bb
numaa[deg] += 1
fold[deg] += 1
enc_tot = fold[1]
for z in range(2, 9):
if fold[z]:
if numaa[z] and totb[z] > 0:
averb = totb[z] / numaa[z]
elif z == 3 and numaa[2] > 0 and numaa[4] > 0 and fold[z] == 1:
averb = (totb[2] / numaa[2] + totb[4] / numaa[4]) * 0.5
else:
error_t = True
break
enc_tot += fold[z] / averb
if error_t:
result = 0
elif enc_tot <= 61:
result = enc_tot
else:
result = 61.00
return result
def get_enc(query_seq, precision=2):
(degeneracy_dict, codon_dict) = degenerated()
seq_string = str(query_seq).upper().replace('U', 'T')
(aa_dict, seq_codon_dict) = read_seq(seq_string, genetic_code)
result = enc_calculation(aa_dict, codon_dict, seq_codon_dict, degeneracy_dict)
return round(result, precision)
| cvn001/codonPY | src/get_ENC_from_fasta.py | Python | mit | 2,880 | [
"Galaxy"
] | 2ed881516e92ee4e0addf19ce7ad6e7e2bd439ac546694a1aeb5ba7113628f5d |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tinker(CMakePackage):
"""The Tinker molecular modeling software is a complete and general
package for molecular mechanics and dynamics, with some special
features for biopolymers.
"""
homepage = "https://dasher.wustl.edu/tinker/"
url = "https://dasher.wustl.edu/tinker/downloads/tinker-8.7.1.tar.gz"
version('8.7.1', sha256='0d6eff8bbc9be0b37d62b6fd3da35bb5499958eafe67aa9c014c4648c8b46d0f')
patch('tinker-8.7.1-cmake.patch')
depends_on('fftw')
root_cmakelists_dir = 'source'
| LLNL/spack | var/spack/repos/builtin/packages/tinker/package.py | Python | lgpl-2.1 | 758 | [
"TINKER"
] | e4920a6f0a73d35c262f517e490f412b092a869e2997880bfdb5808e89b30941 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from collections.abc import Iterable
from distutils.version import LooseVersion
from functools import reduce
from typing import Any, List, Optional, Tuple, Union, TYPE_CHECKING, cast
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import functions as F
from pyspark.sql.types import (
BooleanType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.typedef import Scalar, spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
from pyspark.pandas.window import Rolling, Expanding
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import DataFrameGroupBy, SeriesGroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key):
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(self, op, should_resolve: bool = False):
pass
@abstractmethod
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=True, **kwargs):
pass
@property
@abstractmethod
def dtypes(self):
pass
@abstractmethod
def to_pandas(self):
pass
@property
@abstractmethod
def index(self):
pass
@abstractmethod
def copy(self):
pass
@abstractmethod
def _to_internal_pandas(self):
pass
@abstractmethod
def head(self, n: int = 5):
pass
# TODO: add 'axis' parameter
def cummin(self, skipna: bool = True) -> Union["Series", "DataFrame"]:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self, skipna: bool = True) -> Union["Series", "DataFrame"]:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self, skipna: bool = True) -> Union["Series", "DataFrame"]:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self, skipna: bool = True) -> Union["Series", "DataFrame"]:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func, *args, **kwargs) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path=None,
sep=",",
na_rep="",
columns=None,
header=True,
quotechar='"',
date_format=None,
escapechar=None,
num_files=None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path=None,
compression="uncompressed",
num_files=None,
mode: str = "overwrite",
orient="records",
lines=True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Union[int, str] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Union[int, str] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), F.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Union[int, str] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, F.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, F.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Union[int, str] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Union[int, str] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Union[int, str] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Union[int, str] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Union[int, str] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Union[int, str] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Union[int, str] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Union[int, str] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column, spark_type):
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Union[int, str] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column, spark_type):
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column, spark_type):
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self) -> Union["DataFrame", "Series"]:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser):
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser.spark.transform(F.abs)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self, by, axis=0, as_index: bool = True, dropna: bool = True
) -> Union["DataFrameGroupBy", "SeriesGroupBy"]:
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
from pyspark.pandas.groupby import DataFrameGroupBy, SeriesGroupBy
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
by = [by]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
by = [by]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
by = [(by,)]
elif is_list_like(by):
new_by = [] # type: List[Union[Tuple, ps.Series]]
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
by = new_by
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if isinstance(self, ps.DataFrame):
return DataFrameGroupBy._build(self, by, as_index=as_index, dropna=dropna)
elif isinstance(self, ps.Series):
return SeriesGroupBy._build(self, by, as_index=as_index, dropna=dropna)
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(self, window, min_periods=None) -> Rolling:
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self, min_periods=1) -> Expanding:
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
return Expanding(self, min_periods=min_periods)
def get(self, key, default=None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis=None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = self.head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self, before=None, after=None, axis=None, copy=True
) -> Union["DataFrame", "Series"]:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(Union[ps.DataFrame, ps.Series], result.copy() if copy else result)
def to_markdown(self, buf=None, mode=None) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
pass
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None) -> Union["DataFrame", "Series"]:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None) -> Union["DataFrame", "Series"]:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column, spark_type):
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, F.lit(None)))
else:
return F.count(spark_column)
def _test():
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| cloud-fan/spark | python/pyspark/pandas/generic.py | Python | apache-2.0 | 102,541 | [
"Elk"
] | 3e141125b1f153e6cc0dff52269cb92276f321d83f5c96ab3e2dd2d24cb572fb |
import ConfigParser
import datetime
import Queue
import logging
import os
import pickle
import socket
import sys
import time
import thread
import traceback
import modules.logger as logger
import modules.protocol as protocol
import modules.scrapping as scrapping
sys.setrecursionlimit(10000)
buffSize = 524288
delimiter = '\n\n12345ZEEK6789\n'
class WorkingNode():
def __init__(self):
# socket
self.host = None
self.port = None
self.data = ""
# general
self.isActive = True
self.masterNodeFormattedAddr = None
self.crawlingType = None
# data container
self.outputQueue = Queue.Queue(0)
self.infoQueue = Queue.Queue(0)
self.urlToVisit = Queue.Queue(0)
# object
self.scrapper = None
self.config = None
def connect(self, host, port):
"""Sets up the connection to the server (max 6 attemps)"""
self.host = host
self.port = port
self.masterNodeFormattedAddr = "[" + str(self.host) + ":" + str(self.port) + "]"
logger.log(logging.DEBUG, "Socket initialization")
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for connectionAttempt in range(6, 0, -1):
if connectionAttempt == 1:
logger.log(logging.CRITICAL, "Unable to connect to host " + self.masterNodeFormattedAddr)
sys.exit()
try:
logger.log(logging.DEBUG, "Connecting to host... " + self.masterNodeFormattedAddr)
self.s.connect((self.host, self.port))
logger.log(logging.INFO, "Connected to " + self.masterNodeFormattedAddr)
break
except socket.error:
logger.log(logging.INFO, "Connection failed to " + self.masterNodeFormattedAddr)
logger.log(logging.INFO, "Retrying in 3 seconds.")
time.sleep(3)
def readConfig(self):
"""Reads the configuration from the server"""
logger.log(logging.DEBUG, "Waiting for configuration from the server.")
if self.isActive:
try:
deserializedPacket = self.readSocket()
logger.log(logging.DEBUG, "Configuration received.")
if deserializedPacket.type == protocol.CONFIG:
self.crawlingType = deserializedPacket.payload.crawlingType
self.config = deserializedPacket.payload.config
# dynamic module reload
basePath = os.path.dirname(sys.argv[0])
if basePath:
basePath = basePath + "/"
# path building
rulePath = basePath + "modules/rule.py"
scrappingPath = basePath + "modules/scrapping.py"
# re-writing source .py
logger.log(logging.INFO, "Importing rule.py from server")
ruleFd = open(rulePath, 'w')
ruleFd.write(self.config.rule_py)
ruleFd.close()
logger.log(logging.INFO, "Importing scrapping.py from server")
scrappingFd = open(scrappingPath, 'w')
scrappingFd.write(self.config.scrapping_py)
scrappingFd.close()
# compilation test
try:
code=open(rulePath, 'rU').read()
compile(code, "rule_test", "exec")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
logger.log(logging.ERROR, "Unable to compile rule.py (is the syntax right?)")
sys.exit(0)
try:
code=open(scrappingPath, 'rb').read(os.path.getsize(scrappingPath))
compile(code, "scrapping_test", "exec")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
logger.log(logging.ERROR, "Unable to compile scrapping.py (is the syntax right?)")
sys.exit(0)
# dynamic reload of modules
# TODO reloading of rule.py should eventually come here
logger.log(logging.INFO, "Reloading modules imported for server")
reload(sys.modules["modules.scrapping"])
payload = protocol.InfoPayload(protocol.InfoPayload.CLIENT_ACK)
packet = protocol.Packet(protocol.INFO, payload)
self.writeSocket(packet)
logger.log(logging.DEBUG, "Sending ACK for configuration.")
else:
raise Exception("Unable to parse configuration.")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
self.isActive = False
def run(self):
"""Launches main threads"""
logger.log(logging.INFO, "\n\nStarting Crawling/Scrapping sequence...")
if self.isActive:
thread.start_new_thread(self.outputThread, ())
thread.start_new_thread(self.inputThread, ())
thread.start_new_thread(self.interpretingThread, ())
thread.start_new_thread(self.crawlingThread, ())
def inputThread(self):
"""Listens for inputs from the server"""
logger.log(logging.DEBUG, "InputThread started")
while self.isActive:
try:
deserializedPacket = self.readSocket()
self.dispatcher(deserializedPacket)
except EOFError:
self.isActive = False
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
self.isActive = False
def outputThread(self):
"""Checks if there are messages to send to the server and sends them"""
logger.log(logging.DEBUG, "OutputThread started")
while self.isActive:
try:
obj = self.outputQueue.get(True) #fix with helper method to prevent block
self.writeSocket(obj)
logger.log(logging.DEBUG, "Sending obj of type " + str(obj.type))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
self.isActive = False
def interpretingThread(self):
"""Interprets message from the server other than type URL. (ie: INFO)"""
logger.log(logging.DEBUG, "InterpretingThread started")
while self.isActive:
try:
time.sleep(0.01) #temp - For testing
packets = protocol.deQueue([self.infoQueue])
if not packets:
continue
for packet in packets:
if packet.type == protocol.INFO:
logger.log(logging.INFO, "Interpreting INFO packet : " + str(packet.payload.urlList))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
self.isActive = False
def crawlingThread(self):
"""Takes URL from the urlToVisit queue and visits them"""
logger.log(logging.DEBUG, "CrawlingThread started")
self.scrapper = scrapping.Scrapper(self.config.userAgent, self.config.robotParserEnabled, self.config.domainRestricted, self.config.crawling)
while self.isActive:
try:
urlList = protocol.deQueue([self.urlToVisit])
if not urlList:
time.sleep(0.2) #temp - For testing
continue
for url in urlList:
session = self.scrapper.visit(url)
logger.log(logging.DEBUG, "Session \n" + str(session.url) +
"\nCode : " + str(session.returnCode) +
"\nRequest time : " + str(session.requestTime) +
"\nBs time : " + str(session.bsParsingTime))
if not session.failed:
if self.crawlingType == protocol.ConfigurationPayload.DYNAMIC_CRAWLING:
payload = protocol.URLPayload(session.scrappedURLs, protocol.URLPayload.SCRAPPED_URL)
packet = protocol.Packet(protocol.URL, payload)
self.outputQueue.put(packet)
payload = protocol.URLPayload([url], protocol.URLPayload.VISITED, session=session)
packet = protocol.Packet(protocol.URL, payload)
self.outputQueue.put(packet)
else:
logger.log(logging.INFO, "Skipping URL : " + url)
payload = protocol.URLPayload([url], protocol.URLPayload.SKIPPED, session)
packet = protocol.Packet(protocol.URL, payload)
self.outputQueue.put(packet)
continue
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
self.isActive = False
def dispatcher(self, packet):
"""Dispatches packets to the right packet queue"""
if packet is None:
return
elif packet.type == protocol.INFO:
logger.log(logging.DEBUG, "Dispatching INFO packet")
self.infoQueue.put(packet)
elif packet.type == protocol.URL:
logger.log(logging.DEBUG, "Dispatching url packet : " + str(packet.payload.urlList[0]))
for site in packet.payload.urlList:
self.urlToVisit.put(site)
else:
logger.log(logging.CRITICAL, "Unrecognized packet type : " + str(packet.type) + ". This packet was dropped")
return
logger.log(logging.DEBUG, "Dispatched packet of type: " + str(packet.type))
def writeSocket(self, obj):
try:
serializedObj = pickle.dumps(obj)
logger.log(logging.DEBUG, "Sending " + str(len(serializedObj + delimiter)) + " bytes to server")
self.s.sendall(serializedObj + delimiter)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
message = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
logger.log(logging.CRITICAL, message)
raise Exception("Unable to write to socket (lost connection to server)")
def readSocket(self, timeOut=None):
self.s.settimeout(timeOut)
data = self.data
if "\n\n12345ZEEK6789\n" in data:
data = data.split("\n\n12345ZEEK6789\n")
self.data = "\n\n12345ZEEK6789\n".join(data[1:])
return pickle.loads(data[0])
while self.isActive:
buffer = self.s.recv(buffSize)
data = data + buffer
if not buffer:
logger.log(logging.INFO, "\nLost connection to server " + self.masterNodeFormattedAddr)
self.isActive = False
if "\n\n12345ZEEK6789\n" in data:
data = data.split("\n\n12345ZEEK6789\n")
self.data = "\n\n12345ZEEK6789\n".join(data[1:])
break
if self.isActive == False:
return
logger.log(logging.DEBUG, "Receiving " + str(len(data[0])) + " bytes from server")
return pickle.loads(data[0])
def disconnect(self):
"""Disconnects from the server"""
self.isActive = False
self.s.close()
def main():
path = os.path.dirname(sys.argv[0])
if path:
path = path + "/"
#config
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(path + 'config')
host = config.get('client', 'hostAddr')
port = config.getint('client', 'hostPort')
logPath = config.get('common', 'logPath')
verbose = config.get('common', 'verbose')
if verbose == "True" or verbose == "true":
verbose = True
else:
verbose = False
#setup
logger.init(logPath, "client-" + str(datetime.datetime.now()))
logger.debugFlag = verbose
node = WorkingNode()
node.connect(host, port)
node.readConfig()
node.run()
while node.isActive:
time.sleep(0.5)
node.disconnect()
if __name__ == "__main__":
main() | Diastro/Zeek | src/client.py | Python | mit | 13,765 | [
"VisIt"
] | 811ffb2583d036725443f20e4f336539cd0fa9768ddd32e26d5a160f6bdb995b |
import requests
import json
import os
import sys
import re
import traceback
from adsputils import setup_logging, get_date, date2solrstamp
from aip.classic import enforce_schema
logger = setup_logging('solr_adapter')
ARTICLE_TYPES = set(['eprint', 'article', 'inproceedings', 'inbook'])
AUTHOR_TYPES = set(['regular', 'collaboration'])
def get_date_by_datetype(ADS_record):
"""computes the standard pubdate by selecting the appropriate value
from the ADS_record and formatting it as YYYY-MM-DD"""
dates = ADS_record['metadata']['general']['publication']['dates']
for datetype in [ 'date-published', 'date-thesis', 'date-preprint' ]:
try:
return next(i['content'] for i in dates if i['type'].lower() == datetype)
except StopIteration:
pass
return None
def _normalize_author_name(strname):
if not strname:
return None
return ' '.join(strname.split('.')).strip()
class SolrAdapter(object):
SCHEMA = {
'abstract': u'',
'ack': u'',
'aff': [u'',],
'alternate_bibcode': [u'',],
'alternate_title': [u'',],
'arxiv_class': [u'',],
'author': [u'',],
'author_count': 0,
'author_facet': [u'',],
#'author_native': [u'',], Waiting for montysolr
'author_facet_hier': [u'',],
'author_norm': [u'',],
'book_author': [u'',],
'bibcode': u'',
'bibgroup': [u'', ],
'bibgroup_facet': [u'', ],
'bibstem': [u'', ],
'bibstem_facet': u'',
'comment': [u'',],
'copyright': [u'',],
'database': [u'',],
'date': u'YYYY-MM[-DD]',
'doctype': u'',
'doctype_facet_hier': [u''],
'doi':[u'',],
'editor': [u'',],
'eid':u'',
'email': [u'', ],
'entry_date': '',
'facility': [u'', ],
'first_author': u'',
'first_author_facet_hier': [u'',],
'first_author_norm':u'',
'id': 0,
'identifier': [u'',],
'isbn': [u'',],
'issn': [u'',],
'issue': u'',
'keyword': [u'', ],
'keyword_facet': [u'', ],
'keyword_norm': [u'', ],
'keyword_schema': [u'', ],
'lang': u'',
'links_data': [u'', ],
'orcid': [u''],
'orcid_pub': [u''],
'orcid_user': [u''],
'orcid_other': [u''],
'page': [u''],
'page_range': u'',
'page_count': 0,
'pub': u'',
'pubnote': [u'',],
'pub_raw': u'',
'pubdate': u'',
'recid': 0,
'series': u'',
'thesis': u'',
'title': [u'', ],
'vizier': [u'', ],
'vizier_facet':[u'', ],
'volume': u'',
'year': u'',
}
#------------------------------------------------
# Private methods; responsible for translating schema: ADS->Solr
@staticmethod
def _abstract(ADS_record):
abstracts = ADS_record['metadata']['general'].get('abstracts', [])
result = None
for r in abstracts:
if r['lang'] == "en":
result = r['text']
if not result and abstracts: # attempt fallback to other language if en not present
result = abstracts[0].get('text', '')
return {'abstract': result}
@staticmethod
def _ack(ADS_record):
result = ADS_record['text'].get('acknowledgement', {}).get('content')
return {'ack': result}
@staticmethod
def _aff(ADS_record):
authors = [i for i in ADS_record['metadata']['general'].get('authors', []) if i['type'] in AUTHOR_TYPES]
authors = sorted(authors, key=lambda k: int(k['number']))
result = ['; '.join([j for j in i['affiliations'] if j]) if i['affiliations'] else u'-' for i in authors]
return {'aff': result}
@staticmethod
def _alternate_bibcode(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['relations'].get('alternates', [])]
result = list(set(result))
return {'alternate_bibcode': result}
@staticmethod
def _alternate_title(ADS_record):
result = []
for r in ADS_record['metadata']['general'].get('titles', []):
if not r['lang'] or r['lang'] != "en":
result.append(r['text'])
return {'alternate_title': result}
@staticmethod
def _arxiv_class(ADS_record):
results = [i for i in ADS_record['metadata']['general'].get('arxivcategories', [])]
return {'arxiv_class':results}
@staticmethod
def _author(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = [i['name']['western'] for i in authors if i['name']['western'] and i['type'] in AUTHOR_TYPES]
return {'author': result}
@staticmethod
def _author_count(ADS_record):
authors = ADS_record['metadata']['general'].get('authors',[])
result = len([i['name']['western'] for i in authors if i['name']['western'] and i['type'] in AUTHOR_TYPES])
return {'author_count': result}
@staticmethod
def _author_norm(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = [i['name']['normalized'] for i in authors if i['name']['normalized'] and i['type'] in AUTHOR_TYPES]
return {'author_norm': result}
@staticmethod
def _book_author(ADS_record):
author = ADS_record['metadata']['general'].get('book_author', [])
author = sorted(author, key=lambda k: int(k['number']))
result = [i['name']['western'] for i in author if i['name']['western']]
return {'book_author': result}
@staticmethod
def _editor(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = [i['name']['western'] for i in authors if i['name']['western'] and i['type']=='editor']
return {'editor': result}
@staticmethod
def _author_facet(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = [i['name']['normalized'] for i in authors if i['name']['normalized'] and i['type'] in AUTHOR_TYPES]
return {'author_facet': result}
@staticmethod
def _author_facet_hier(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = []
for author in authors:
if author['type'] in AUTHOR_TYPES:
if author['name']['normalized']:
r = u"0/%s" % (_normalize_author_name(author['name']['normalized']),)
result.append(r)
if author['name']['western']:
r = u"1/%s/%s" % (_normalize_author_name(author['name']['normalized']), _normalize_author_name(author['name']['western']))
result.append(r)
return {'author_facet_hier': result}
# waiting for montysolr
# @staticmethod
# def _author_native(ADS_record):
# authors = ADS_record['metadata']['general'].get('authors',[])
# authors = sorted(authors,key=lambda k: int(k['number']))
# result = [i['name']['native'] if i['name']['native'] else u"-" for i in authors]
# return {'author_native': result}
@staticmethod
def _bibcode(ADS_record):
return {'bibcode': ADS_record['bibcode']}
@staticmethod
def _bibgroup(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['properties'].get('bibgroups', [])]
result = list(set(result))
return {'bibgroup': result}
@staticmethod
def _bibgroup_facet(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['properties'].get('bibgroups', [])]
result = list(set(result))
return {'bibgroup_facet': result}
@staticmethod
def _bibstem(ADS_record):
b = ADS_record['bibcode']
short, long = bibstem_mapper(b)
# index both long and short bibstems
result = map(unicode, [re.sub(r'\.+$', '', short), long])
return {'bibstem':result}
@staticmethod
def _bibstem_facet(ADS_record):
b = ADS_record['bibcode']
short, long = bibstem_mapper(b)
if re.match(r'^[\.\d]+$', long[5:9]):
# is a serial publication, use short bibstem
result = short.replace('.', '')
else:
# is book/conference/arxiv, use long bibstem
result = re.sub(r'\.+$', '', long)
return {'bibstem_facet':unicode(result)}
@staticmethod
def _copyright(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['general'].get('copyright', [])]
return {'copyright': result}
@staticmethod
def _comment(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['general'].get('comment', [])]
result = list(set(result))
# XXX - Hack to avoid a re-indexing because of non-multivalued field 'comment'
if len(result) > 1:
result = [ '\n'.join(result) ]
return {'comment': result}
@staticmethod
def _database(ADS_record):
translation = {
'PHY': u'physics',
'AST': u'astronomy',
'GEN': u'general',
}
result = [translation[i['content'].upper()] for i in ADS_record['metadata']['properties'].get('databases', [])]
result = list(set(result))
return {'database': result}
@staticmethod
def _entry_date(ADS_record):
d = ADS_record.get('entry_date', None)
return {'entry_date': date2solrstamp(d and get_date(d) or get_date())}
@staticmethod
def _year(ADS_record):
dates = ADS_record['metadata']['general']['publication']['dates']
try:
result = next(i['content'] for i in dates if i['type'].lower() == 'publication_year') # TODO: Catch StopIteration
except StopIteration:
result = None
return {'year':result}
@staticmethod
def _date(ADS_record):
result = get_date_by_datetype(ADS_record)
if result:
try:
result = enforce_schema.Enforcer.parseDate(result)
except ValueError:
result = None
# should we throw an exception if result is null?
return {'date':result}
@staticmethod
def _doctype(ADS_record):
result = ADS_record['metadata']['properties']\
.get('doctype', {})\
.get('content')\
.lower()
return {'doctype': result}
@staticmethod
def _doctype_facet_hier(ADS_record):
doctype = ADS_record['metadata']['properties']\
.get('doctype', {})\
.get('content')\
.lower()
(top,type) = doctype_mapper(doctype)
result = [ u"0/%s" % top, u"1/%s/%s" % (top, type) ]
return {'doctype_facet_hier': result}
@staticmethod
def _doi(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['general'].get('doi', [])]
return {'doi': result}
@staticmethod
def _eid(ADS_record):
result = ADS_record['metadata']['general']['publication'].get('electronic_id')
return {'eid': result}
@staticmethod
def _email(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = ['; '.join([j for j in i['emails'] if j]) if i['emails'] else u'-' for i in authors]
return {'email': result}
@staticmethod
def _first_author(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
if not authors:
result = None
else:
result = authors[0]['name']['western']
return {'first_author': result}
@staticmethod
def _first_author_facet_hier(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = []
if authors:
if authors[0]['name']['normalized']:
r = u"0/%s" % (_normalize_author_name(authors[0]['name']['normalized']),)
result.append(r)
if authors[0]['name']['western']:
r = u"1/%s/%s" % (_normalize_author_name(authors[0]['name']['normalized']),
_normalize_author_name(authors[0]['name']['western']))
result.append(r)
return {'first_author_facet_hier':result}
@staticmethod
def _first_author_norm(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
if authors:
result = authors[0]['name']['normalized']
else:
result = None
return {'first_author_norm': result}
@staticmethod
def _lang(ADS_record):
return {'lang': ADS_record['metadata'].get('language', '')}
@staticmethod
def _links_data(ADS_record):
result = [json.dumps({"title": i.get('title', "") or "",
"type": i.get('type', "") or "",
"instances": i.get('count', "") or "",
"access": i.get('access', "") or "",
"url": i.get("url", "") or ""},
sort_keys=True) \
for i in ADS_record['metadata']['relations'].get('links',[])]
result = [unicode(i) for i in result]
return {'links_data': result}
@staticmethod
def _id(ADS_record):
return {'id': ADS_record['id']}
@staticmethod
def _identifier(ADS_record):
result = []
result.extend([i['content'] for i in ADS_record['metadata']['relations'].get('preprints', [])])
result.extend([i['content'] for i in ADS_record['metadata']['general'].get('doi', [])])
result.extend([i['content'] for i in ADS_record['metadata']['relations'].get('alternates', [])])
result.extend([i['content'] for i in ADS_record['metadata']['relations'].get('identifiers', [])])
return {'identifier': list(set(result))}
@staticmethod
def _issn(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['general'].get('issns', [])]
result = unroll_unique_list(result)
return {'issn': result}
@staticmethod
def _isbn(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['general'].get('isbns', [])]
result = unroll_unique_list(result)
return {'isbn': result}
@staticmethod
def _issue(ADS_record):
result = ADS_record['metadata']['general'].get('publication', {}).get('issue')
return {'issue': result}
@staticmethod
def _page(ADS_record):
result = [ADS_record['metadata']['general']['publication'].get('page')]
if ADS_record['metadata']['general']['publication'].get('electronic_id'):
result.append(ADS_record['metadata']['general']['publication']['electronic_id'])
return {'page': filter(None, result)}
@staticmethod
# return page range only if found in source record
def _page_range(ADS_record):
result = ADS_record['metadata']['general']['publication'].get('page_range', u'')
return {'page_range':result}
@staticmethod
def _page_count(ADS_record):
result = ADS_record['metadata']['general']['publication'].get('page_count',0)
try:
result = int(result)
except TypeError:
result = 0
return {'page_count':result}
@staticmethod
def _pub(ADS_record):
return {'pub': ADS_record['metadata']['general'].get('publication', {}).get('name', {}).get('canonical')}
@staticmethod
def _pub_raw(ADS_record):
return {'pub_raw': ADS_record['metadata']['general'].get('publication', {}).get('name', {}).get('raw')}
@staticmethod
def _pubdate(ADS_record):
result = get_date_by_datetype(ADS_record)
return {'pubdate':result}
@staticmethod
def _pubnote(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['general'].get('pubnote',[])]
return {'pubnote':result}
@staticmethod
def _series(ADS_record):
return {'series': ADS_record['metadata']['general'].get('publication', {}).get('series')}
@staticmethod
def _keyword(ADS_record):
"""original keywords; must match one-to-one with _keyword_schema and _keyword_norm"""
result = [i['original'] if i['original'] else u'-' for i in ADS_record['metadata']['general'].get('keywords', [])]
return {'keyword': result}
@staticmethod
def _keyword_norm(ADS_record):
"""normalized keywords; must match one-to-one with _keyword and _keyword_schema"""
result = [i['normalized'] if i['normalized'] else u'-' for i in ADS_record['metadata']['general'].get('keywords', [])]
return {'keyword_norm': result}
@staticmethod
def _keyword_schema(ADS_record):
"""keyword system; must match one-to-one with _keyword and _keyword_norm"""
result = [i['type'] if i['type'] else u'-' for i in ADS_record['metadata']['general'].get('keywords', [])]
return {'keyword_schema': result}
@staticmethod
def _keyword_facet(ADS_record):
# keep only non-empty normalized keywords
result = filter(None, [i['normalized'] for i in ADS_record['metadata']['general'].get('keywords', [])])
return {'keyword_facet':result}
@staticmethod
def _orcid(ADS_record):
authors = ADS_record['metadata']['general'].get('authors', [])
authors = sorted(authors, key=lambda k: int(k['number']))
result = [i['orcid'] if i['orcid'] else u'-' for i in authors]
out = {'orcid_pub': result}
if 'orcid_claims' in ADS_record:
for indexname, claimname in [('orcid_user', 'verified'), ('orcid_other', 'unverified')]:
if claimname in ADS_record['orcid_claims']:
claims = ADS_record['orcid_claims'][claimname]
# basic check, the length should be equal
if len(claims) != len(authors):
logger.warn("Potential problem with orcid claims for: {0} (len(authors) != len(claims))"
.format(ADS_record['bibcode']))
# TODO: in the grant scheme of things, we should trigger ADS orcid update (let the remote
# pipeline processes know, that something is out of sync); for now we'll just truncate the
# data
if len(claims) > len(authors):
claims = claims[0:len(authors)]
else:
claims = claims + [u'-'] * (len(authors) - len(claims))
out[indexname] = claims
return out
@staticmethod
def _title(ADS_record):
result = [i['text'] for i in ADS_record['metadata']['general'].get('titles', [])]
return {'title':result}
@staticmethod
def _volume(ADS_record):
return {'volume': ADS_record['metadata']['general'].get('publication', {}).get('volume')}
@staticmethod
def _vizier(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['properties'].get('vizier_tables', [])]
return {'vizier': result}
@staticmethod
def _vizier_facet(ADS_record):
result = [i['content'] for i in ADS_record['metadata']['properties'].get('vizier_tables', [])]
return {'vizier_facet': result}
#------------------------------------------------
# Public Entrypoints
@classmethod
def adapt(cls, ADS_record):
assert isinstance(ADS_record, dict)
result = {}
for k in cls.SCHEMA:
try:
D = getattr(cls, '_%s' % k)(ADS_record)
v = D.values()
if not v or (len(v) == 1 and not isinstance(v[0], int) and not isinstance(v[0], float) and not v[0]):
D = {}
result.update(D)
except AttributeError, e:
logger.debug("NotImplementedWarning: %s" % e)
if "type object 'SolrAdapter'" not in e.message:
raise
# raise NotImplementedError
return result
@classmethod
def validate(cls, solr_record):
'''
Validates types and keys of `record` against self.schema.
Raises AssertionError if types or keys do not match
'''
r = solr_record
SCHEMA = cls.SCHEMA
assert isinstance(r, dict)
for k, v in r.iteritems():
assert k in SCHEMA, '{0}: not in schema'.format(k)
assert isinstance(v, type(SCHEMA[k])), '{0}: has an unexpected type ({1}!={2}): {3}'.format(k, type(v), SCHEMA[k], v)
if isinstance(v, list) and v: # No expectation of nested lists
assert len(set([type(i) for i in v])) == 1, "{0}: multiple data-types in list: {1}".format(k, v)
assert isinstance(v[0], type(SCHEMA[k][0])), "{0}: inner list element has unexpected type ({1}!={2}): {3}".format(k, type(v[0]), type(SCHEMA[k][0]), v)
def unroll_unique_list(array):
"""
Takes a list in input, unpacks nested elements, uniques them,
and returns a list. Used to normalize some fields such as
isbns and issns for which different data structures may be
created by the json import due to XML element multiplicity
(or lack thereof). Yes, it's a hack that could be avoided if
we tightened the Enforcer code.
"""
result = []
for i in array:
if isinstance(i, list):
result += i
else:
result.append(i)
return filter(lambda x: x is not None, set(result))
doctype_dict = {
'article': 'Journal Article',
'proceedings': 'Proceedings',
'inproceedings': 'Proceedings Article',
'book': 'Book',
'inbook': 'Book Chapter',
'techreport': 'Tech Report',
'intechreport': 'In Tech Report',
'eprint': 'e-print',
'abstract': 'Abstract',
'mastersthesis': 'Masters Thesis',
'phdthesis': 'PhD Thesis',
'talk': 'Talk',
'software': 'Software',
'proposal': 'Proposal',
'pressrelease': 'Press Release',
'circular': 'Circular',
'newsletter': 'Newsletter',
'catalog': 'Catalog',
'editorial': 'Editorial',
'misc': 'Other'
}
def doctype_mapper(doctype):
"""
Maps a document type to pair of hierarchical entries
which include the top-level type and the type used for
facets
"""
htype = 'Article' if doctype in ARTICLE_TYPES else 'Non-Article'
stype = doctype_dict.get(doctype, 'Other')
return (htype, stype)
def simbad_type_mapper(otype):
"""
Maps a native SIMBAD object type to a subset of basic classes
used for searching and faceting. Based on Thomas Boch's mappings
used in AladinLite
"""
if otype.startswith('G') or otype.endswith('G'):
return u'Galaxy'
elif otype == 'Star' or otype.find('*') >= 0:
return u'Star'
elif otype == 'Neb' or otype.startswith('PN') or otype.startswith('SNR'):
return u'Nebula'
elif otype == 'HII':
return u'HII Region'
elif otype == 'X':
return u'X-ray'
elif otype.startswith('Radio') or otype == 'Maser' or otype == 'HI':
return u'Radio'
elif otype == 'IR' or otype.startswith('Red'):
return u'Infrared'
elif otype == 'UV':
return u'UV'
else:
return u'Other'
_o_types = {}
[_o_types.__setitem__(x, u'Galaxy') for x in ["G","GClstr","GGroup","GPair","GTrpl","G_Lens","PofG"]]
[_o_types.__setitem__(x, u'Nebula') for x in ['Neb','PN','RfN']]
[_o_types.__setitem__(x, u'HII Region') for x in ['HII']]
[_o_types.__setitem__(x, u'X-ray') for x in ['X']]
[_o_types.__setitem__(x, u'Radio') for x in ['Maser', 'HI']]
[_o_types.__setitem__(x, u'Infrared') for x in ['IrS']]
[_o_types.__setitem__(x, u'Star') for x in ['Blue*','C*','exG*','Flare*','Nova','Psr','Red*','SN','SNR','V*','VisS','WD*','WR*']]
def ned_type_mapper(otype):
"""
Maps a native NED object type to a subset of basic classes
used for searching and faceting.
"""
if otype.startswith('!'):
return u'Galactic Object'
elif otype.startswith('*'):
return u'Star'
elif otype.startswith('Uv'):
return u'UV'
elif otype.startswith('Radio'):
return u'Radio'
else:
return _o_types.get(otype, u'Other')
arxiv_categories = set(["acc.phys.",
"adap.org.",
"alg.geom.",
"ao.sci...",
"astro.ph.",
"atom.ph..",
"bayes.an.",
"chao.dyn.",
"chem.ph..",
"cmp.lg...",
"comp.gas.",
"cond.mat.",
"cs.......",
"dg.ga....",
"funct.an.",
"gr.qc....",
"hep.ex...",
"hep.lat..",
"hep.ph...",
"hep.th...",
"math.....",
"math.ph..",
"mtrl.th..",
"nlin.....",
"nucl.ex..",
"nucl.th..",
"patt.sol.",
"physics..",
"plasm.ph.",
"q.alg....",
"q.bio....",
"quant.ph.",
"solv.int.",
"supr.con."])
# these are publications for which there may be a 5-digit volume
# which "spills left" i.e. has its most significant digit in the
# journal field
PUB_VOLUME_SPILLS_LEFT = (
'SPIE',
'ATel',
'GCN.',
)
def bibstem_mapper(bibcode):
short_stem = bibcode[4:9]
long_stem = bibcode[4:13]
vol_field = bibcode[9:13]
# first take care of special cases
# ApJL
if short_stem == 'ApJ..' and bibcode[13:14] == 'L':
short_stem = u'ApJL.'
long_stem = short_stem + vol_field
# MPECs have a letter in the journal field which should be ignored
elif short_stem == 'MPEC.' and re.match(r'^[\.\w]+$', vol_field):
vol_field = u'....'
long_stem = short_stem + vol_field
# map old arXiv bibcodes to arXiv only
elif long_stem in arxiv_categories:
short_stem = u'arXiv'
vol_field = u'....'
long_stem = short_stem + vol_field
# 5th character could be volume digit, in whih case reset it
elif short_stem[0:4] in PUB_VOLUME_SPILLS_LEFT and short_stem[4].isdigit():
short_stem = short_stem[0:4] + u'.'
return (unicode(short_stem), unicode(long_stem))
| adsabs/ADSimportpipeline | aip/classic/solr_adapter.py | Python | gpl-3.0 | 25,646 | [
"Galaxy"
] | fed38c892335794f81b809163ff6dd9f761055a29bff6146badbf2f95c3ac425 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, AcceptanceTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
shard = 23
def visit_account_settings_page(self, gdpr=False):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
# TODO: LEARNER-4422 - delete when we clean up flags
if gdpr:
self.account_settings_page.browser.get(self.browser.current_url + "?course_experience.gdpr=1")
self.account_settings_page.wait_for_page()
def log_in_as_unique_user(self, email=None, full_name=None, password=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(
self.browser,
username=username,
email=email,
full_name=full_name,
password=password
).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
class AccountSettingsA11yTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Class to test account settings accessibility.
"""
a11y = True
def test_account_settings_a11y(self):
"""
Test the accessibility of the account settings page.
"""
self.log_in_as_unique_user()
self.visit_account_settings_page()
self.account_settings_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.account_settings_page.a11y_audit.check_for_accessibility_errors()
| stvstnfrd/edx-platform | common/test/acceptance/tests/lms/test_account_settings.py | Python | agpl-3.0 | 2,482 | [
"VisIt"
] | 2cb6c193b297d4e082fd061cadad8fcf5f2d33ee9aa21baca6780492547a6e90 |
#!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all October 2013 SIMZ activities.
Mike McCann
MBARI 24 October 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
cl = CANONLoader('stoqs_simz_oct2014', 'Sampling and Identification of Marine Zooplankton - October 2014',
description = 'Rachel Carson and Dorado surveys in Northern Monterey Bay',
# Assign a GeoOrigin in the middle of the terrain that is an appropriate
# location for rotateYUp, making fly navigation work well. All other GCC
# positions are in the GeoOrigin with rotateYUp='true' coordinate system
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-5334.11754 77527.85269 57495.84643',
'orientation': '-0.99840 0.05415 0.01651 0.88794',
'centerOfRotation': '1973.702 -553.761 -10885.8333',
'VerticalExaggeration': '10',
'geoOrigin': '36.75 -122 0',
'speed': '0.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
startDatetime = datetime.datetime(2014, 10, 15)
endDatetime = datetime.datetime(2014, 10, 23)
# Aboard the Carson use zuma:
##cl.tdsBase = 'http://zuma.rc.mbari.org/thredds/'
# On shore, use the odss server:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
# 2-second decimated dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2014/netcdf/' # Dorado archive
cl.dorado_files = [
'Dorado389_2014_289_04_289_04_decim.nc',
'Dorado389_2014_290_00_290_00_decim.nc',
'Dorado389_2014_293_00_293_00_decim.nc',
'Dorado389_2014_294_00_294_00_decim.nc',
'Dorado389_2014_295_00_295_00_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList' ]
# Rachel Carson Underway CTD
cl.rcuctd_base = cl.dodsBase + 'SIMZ/2014_Oct/carson/uctd/'
cl.rcuctd_files = [
'28914plm01.nc', '29014plm01.nc', '29314plm01.nc', '29414plm01.nc', '29514plm01.nc',
]
cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
# Rachel Carson Profile CTD
cl.pctdDir = 'SIMZ/2014_Oct/carson/pctd/'
cl.rcpctd_base = cl.dodsBase + cl.pctdDir
cl.rcpctd_files = [
'SIMZ2014C40.nc', 'SIMZ2014C41.nc', 'SIMZ2014C42.nc', 'SIMZ2014C43.nc', 'SIMZ2014C44.nc',
'SIMZ2014C45.nc', 'SIMZ2014C46.nc', 'SIMZ2014C47.nc', 'SIMZ2014C48.nc', 'SIMZ2014C49.nc',
'SIMZ2014C50.nc', 'SIMZ2014C51.nc', 'SIMZ2014C52.nc', 'SIMZ2014C53.nc', 'SIMZ2014C54.nc',
'SIMZ2014C55.nc', 'SIMZ2014C56.nc', 'SIMZ2014C57.nc', 'SIMZ2014C58.nc', 'SIMZ2014C59.nc',
'SIMZ2014C60.nc', 'SIMZ2014C61.nc', 'SIMZ2014C62.nc', 'SIMZ2014C63.nc', 'SIMZ2014C64.nc',
'SIMZ2014C65.nc', 'SIMZ2014C66.nc', 'SIMZ2014C67.nc', 'SIMZ2014C68.nc', 'SIMZ2014C69.nc',
]
cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
# Mooring M1 Combined file produced by DPforSSDS processing - for just the duration of the campaign
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/201407/'
cl.m1_files = ['OS_M1_20140716hourly_CMSTV.nc']
cl.m1_parms = [ 'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startDatetime
cl.m1_endDatetime = endDatetime
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/GOC12/ copied to local GOC12 dir
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'SIMZOct2013')
cl.subsample_csv_files = [
#'2013_SIMZ_AUV_STOQS.csv',
#'2013_SIMZ_Niskins_STOQS.csv',
##'2013_SIMZ_TowNets_STOQS.csv',
]
# Execute the load
cl.process_command_line()
if cl.args.test:
##cl.loadL_662(stride=1)
cl.loadDorado(stride=100)
cl.loadRCuctd(stride=100)
cl.loadRCpctd(stride=1)
cl.loadM1(stride=10)
#cl.loadSubSamples()
elif cl.args.optimal_stride:
##cl.loadL_662(stride=1)
cl.loadDorado(stride=1)
cl.loadRCuctd(stride=1)
cl.loadRCpctd(stride=1)
cl.loadM1(stride=1)
#cl.loadSubSamples()
else:
cl.stride = cl.args.stride
##cl.loadL_662()
cl.loadDorado()
cl.loadRCuctd()
cl.loadRCpctd()
cl.loadM1()
#cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print "All Done."
| josephmfaulkner/stoqs | stoqs/loaders/MolecularEcology/loadSIMZ_oct2014.py | Python | gpl-3.0 | 5,630 | [
"NetCDF"
] | a9907c788021dfeda7cb05a16764332e66360e1cf92900d6d52fd31ae25db6a7 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion to A-normal form."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.pyct import transformer
class DummyGensym(object):
"""A dumb gensym that suffixes a stem by sequential numbers from 1000."""
def __init__(self, entity_info):
del entity_info
# A proper implementation needs to account for:
# * entity_info.namespace
# * all the symbols defined in the AST
# * the symbols generated so far
self._idx = 0
def new_name(self, stem):
self._idx += 1
return stem + '_' + str(1000 + self._idx)
class AnfTransformer(transformer.Base):
"""Performs the actual conversion."""
# TODO(mdan): Link to a reference.
# TODO(mdan): Implement.
def __init__(self, entity_info):
"""Creates a transformer.
Args:
entity_info: transformer.EntityInfo
"""
super(AnfTransformer, self).__init__(entity_info)
self._gensym = DummyGensym(entity_info)
def transform(node, entity_info):
return AnfTransformer(entity_info).visit(node)
| drpngx/tensorflow | tensorflow/contrib/autograph/pyct/common_transformers/anf.py | Python | apache-2.0 | 1,791 | [
"VisIt"
] | 783395f0b79dd59ea5bd649c2c5707152e51e71b39d08169634acc6ec560cbc8 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import email.parser
import logging
import json
import math
import os
import pickle
import sys
import unittest
from contextlib import closing, contextmanager
from gzip import GzipFile
from shutil import rmtree
import gc
import time
from textwrap import dedent
from hashlib import md5
from pyeclib.ec_iface import ECDriverError
from tempfile import mkdtemp, NamedTemporaryFile
import weakref
import operator
import functools
from swift.obj import diskfile
import re
import random
from collections import defaultdict
import mock
from eventlet import sleep, spawn, wsgi, listen, Timeout, debug
from eventlet.green import httplib
from six import BytesIO
from six import StringIO
from six.moves import range
from six.moves.urllib.parse import quote
from swift.common.utils import hash_path, storage_directory, \
parse_content_type, parse_mime_headers, \
iter_multipart_mime_documents, public
from test.unit import (
connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing,
FakeMemcache, debug_logger, patch_policies, write_fake_ring,
mocked_http_conn, DEFAULT_TEST_EC_TYPE)
from swift.proxy import server as proxy_server
from swift.proxy.controllers.obj import ReplicatedObjectController
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server
from swift.common.middleware import proxy_logging, versioned_writes
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \
APIVersionError
from swift.common import utils, constraints
from swift.common.ring import RingData
from swift.common.utils import mkdirs, normalize_timestamp, NullLogger
from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.proxy.controllers import base as proxy_base
from swift.proxy.controllers.base import get_container_memcache_key, \
get_account_memcache_key, cors_validation
import swift.proxy.controllers
import swift.proxy.controllers.obj
from swift.common.swob import Request, Response, HTTPUnauthorized, \
HTTPException, HeaderKeyDict, HTTPBadRequest
from swift.common import storage_policy
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \
StoragePolicyCollection, POLICIES
from swift.common.request_helpers import get_sys_meta_prefix
# mocks
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
STATIC_TIME = time.time()
_test_coros = _test_servers = _test_sockets = _orig_container_listing_limit = \
_testdir = _orig_SysLogHandler = _orig_POLICIES = _test_POLICIES = None
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
_orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \
_orig_POLICIES, _test_POLICIES
_orig_POLICIES = storage_policy._POLICIES
_orig_SysLogHandler = utils.SysLogHandler
utils.SysLogHandler = mock.MagicMock()
monkey_patch_mimetools()
# Since we're starting up a lot here, we're going to test more than
# just chunked puts; we're also going to test parts of
# proxy_server.Application we couldn't get to easily otherwise.
_testdir = \
os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
mkdirs(_testdir)
rmtree(_testdir)
for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
'sdf1', 'sdg1', 'sdh1', 'sdi1'):
mkdirs(os.path.join(_testdir, drive, 'tmp'))
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers':
'content-encoding, x-object-manifest, content-disposition, foo',
'allow_versions': 't'}
prolis = listen(('localhost', 0))
acc1lis = listen(('localhost', 0))
acc2lis = listen(('localhost', 0))
con1lis = listen(('localhost', 0))
con2lis = listen(('localhost', 0))
obj1lis = listen(('localhost', 0))
obj2lis = listen(('localhost', 0))
obj3lis = listen(('localhost', 0))
objsocks = [obj1lis, obj2lis, obj3lis]
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
account_devs = [
{'port': acc1lis.getsockname()[1]},
{'port': acc2lis.getsockname()[1]},
]
write_fake_ring(account_ring_path, *account_devs)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
container_devs = [
{'port': con1lis.getsockname()[1]},
{'port': con2lis.getsockname()[1]},
]
write_fake_ring(container_ring_path, *container_devs)
storage_policy._POLICIES = StoragePolicyCollection([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False),
ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, ec_segment_size=4096)])
obj_rings = {
0: ('sda1', 'sdb1'),
1: ('sdc1', 'sdd1'),
2: ('sde1', 'sdf1'),
# sdg1, sdh1, sdi1 taken by policy 3 (see below)
}
for policy_index, devices in obj_rings.items():
policy = POLICIES[policy_index]
obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
obj_devs = [
{'port': objsock.getsockname()[1], 'device': dev}
for objsock, dev in zip(objsocks, devices)]
write_fake_ring(obj_ring_path, *obj_devs)
# write_fake_ring can't handle a 3-element ring, and the EC policy needs
# at least 3 devs to work with, so we do it manually
devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]},
{'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
'port': obj3lis.getsockname()[1]}]
pol3_replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
obj3_ring_path = os.path.join(_testdir, POLICIES[3].ring_name + '.ring.gz')
part_shift = 30
with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)
prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(),
logger=debug_logger('proxy'))
for policy in POLICIES:
# make sure all the rings are loaded
prosrv.get_object_ring(policy.idx)
# don't lose this one!
_test_POLICIES = storage_policy._POLICIES
acc1srv = account_server.AccountController(
conf, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
conf, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
conf, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
conf, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj2'))
obj3srv = the_object_server.ObjectController(
conf, logger=debug_logger('obj3'))
_test_servers = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv)
nl = NullLogger()
logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf,
logger=prosrv.logger)
prospa = spawn(wsgi.server, prolis, logging_prosv, nl)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa)
# Create account
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT', '/a',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create another account
# used for account-to-account tests
ts = normalize_timestamp(time.time())
partition, nodes = prosrv.account_ring.get_nodes('a1')
for node in nodes:
conn = swift.proxy.controllers.obj.http_connect(node['ip'],
node['port'],
node['device'],
partition, 'PUT',
'/a1',
{'X-Timestamp': ts,
'x-trans-id': 'test'})
resp = conn.getresponse()
assert(resp.status == 201)
# Create containers, 1 per test policy
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
# Create container in other account
# used for account-to-account tests
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write(
'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
def unpatch_policies(f):
"""
This will unset a TestCase level patch_policies to use the module level
policies setup for the _test_servers instead.
N.B. You should NEVER modify the _test_server policies or rings during a
test because they persist for the life of the entire module!
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with patch_policies(_test_POLICIES):
return f(*args, **kwargs)
return wrapper
def setup():
do_setup(object_server)
def teardown():
for server in _test_coros:
server.kill()
rmtree(os.path.dirname(_testdir))
utils.SysLogHandler = _orig_SysLogHandler
storage_policy._POLICIES = _orig_POLICIES
def sortHeaderNames(headerNames):
"""
Return the given string of header names sorted.
headerName: a comma-delimited list of header names
"""
headers = [a.strip() for a in headerNames.split(',') if a.strip()]
headers.sort()
return ', '.join(headers)
def parse_headers_string(headers_str):
headers_dict = HeaderKeyDict()
for line in headers_str.split('\r\n'):
if ': ' in line:
header, value = line.split(': ', 1)
headers_dict[header] = value
return headers_dict
def node_error_count(proxy_app, ring_node):
# Reach into the proxy's internals to get the error count for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('errors', 0)
def node_last_error(proxy_app, ring_node):
# Reach into the proxy's internals to get the last error for a
# particular node
node_key = proxy_app._error_limit_node_key(ring_node)
return proxy_app._error_limiting.get(node_key, {}).get('last_error')
def set_node_errors(proxy_app, ring_node, value, last_error):
# Set the node's error count to value
node_key = proxy_app._error_limit_node_key(ring_node)
stats = proxy_app._error_limiting.setdefault(node_key, {})
stats['errors'] = value
stats['last_error'] = last_error
class FakeMemcacheReturnsNone(FakeMemcache):
def get(self, key):
# Returns None as the timestamp of the container; assumes we're only
# using the FakeMemcache for container existence checks.
return None
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.proxy.controllers.base, 'http_connect',
None)
orig_account_info = getattr(swift.proxy.controllers.Controller,
'account_info', None)
orig_container_info = getattr(swift.proxy.controllers.Controller,
'container_info', None)
try:
yield True
finally:
swift.proxy.controllers.Controller.account_info = orig_account_info
swift.proxy.controllers.base.http_connect = orig_http_connect
swift.proxy.controllers.obj.http_connect = orig_http_connect
swift.proxy.controllers.account.http_connect = orig_http_connect
swift.proxy.controllers.container.http_connect = orig_http_connect
swift.proxy.controllers.Controller.container_info = orig_container_info
def set_http_connect(*args, **kwargs):
new_connect = fake_http_connect(*args, **kwargs)
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
return new_connect
def _make_callback_func(calls):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context = {}
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
calls.append(context)
return callback
def _limit_max_file_size(f):
"""
This will limit constraints.MAX_FILE_SIZE for the duration of the
wrapped function, based on whether MAX_FILE_SIZE exceeds the
sys.maxsize limit on the system running the tests.
This allows successful testing on 32 bit systems.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_max_file_size = constraints.MAX_FILE_SIZE
if constraints.MAX_FILE_SIZE >= sys.maxsize:
test_max_file_size = (2 ** 30 + 2)
with mock.patch.object(constraints, 'MAX_FILE_SIZE',
test_max_file_size):
return f(*args, **kwargs)
return wrapper
# tests
class TestController(unittest.TestCase):
def setUp(self):
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.memcache = FakeMemcache()
app = proxy_server.Application(None, self.memcache,
account_ring=self.account_ring,
container_ring=self.container_ring)
self.controller = swift.proxy.controllers.Controller(app)
class FakeReq(object):
def __init__(self):
self.url = "/foo/bar"
self.method = "METHOD"
def as_referer(self):
return self.method + ' ' + self.url
self.account = 'some_account'
self.container = 'some_container'
self.request = FakeReq()
self.read_acl = 'read_acl'
self.write_acl = 'write_acl'
def test_transfer_headers(self):
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = {'x-base-meta-owner': 'Gareth',
'x-base-meta-size': '150M'}
self.controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M'}
self.assertEqual(dst_headers, expected_headers)
def check_account_info_return(self, partition, nodes, is_none=False):
if is_none:
p, n = None, None
else:
p, n = self.account_ring.get_nodes(self.account)
self.assertEqual(p, partition)
self.assertEqual(n, nodes)
def test_account_info_container_count(self):
with save_globals():
set_http_connect(200, count=123)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
set_http_connect(200, count='123')
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 123)
with save_globals():
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 200, 'container_count': 1234}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
with save_globals():
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 200, 'container_count': '1234'}
self.memcache.set(cache_key, account_info)
partition, nodes, count = \
self.controller.account_info(self.account)
self.assertEqual(count, 1234)
def test_make_requests(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
set_http_connect(201, raise_timeout_exc=True)
self.controller._make_request(
nodes, partition, 'POST', '/', '', '',
self.controller.app.logger.thread_locals)
# tests if 200 is cached and used
def test_account_info_200(self):
with save_globals():
set_http_connect(200)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# Test the internal representation in memcache
# 'container_count' changed from int to str
cache_key = get_account_memcache_key(self.account)
container_info = {'status': 200,
'container_count': '12345',
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(container_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes)
self.assertEqual(count, 12345)
# tests if 404 is cached and used
def test_account_info_404(self):
with save_globals():
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
# Test the internal representation in memcache
# 'container_count' changed from 0 to None
cache_key = get_account_memcache_key(self.account)
account_info = {'status': 404,
'container_count': None, # internally keep None
'total_object_count': None,
'bytes': None,
'meta': {},
'sysmeta': {}}
self.assertEqual(account_info,
self.memcache.get(cache_key))
set_http_connect()
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
# tests if some http status codes are not cached
def test_account_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_account_info_return(partition, nodes, True)
self.assertEqual(count, None)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
def test_account_info_no_account(self):
with save_globals():
self.memcache.store = {}
set_http_connect(404, 404, 404)
partition, nodes, count = \
self.controller.account_info(self.account, self.request)
self.check_account_info_return(partition, nodes, is_none=True)
self.assertEqual(count, None)
def check_container_info_return(self, ret, is_none=False):
if is_none:
partition, nodes, read_acl, write_acl = None, None, None, None
else:
partition, nodes = self.container_ring.get_nodes(self.account,
self.container)
read_acl, write_acl = self.read_acl, self.write_acl
self.assertEqual(partition, ret['partition'])
self.assertEqual(nodes, ret['nodes'])
self.assertEqual(read_acl, ret['read_acl'])
self.assertEqual(write_acl, ret['write_acl'])
def test_container_info_invalid_account(self):
def account_info(self, account, request, autocreate=False):
return None, None
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
ret = self.controller.container_info(self.account,
self.container,
self.request)
self.check_container_info_return(ret, True)
# tests if 200 is cached and used
def test_container_info_200(self):
with save_globals():
headers = {'x-container-read': self.read_acl,
'x-container-write': self.write_acl}
set_http_connect(200, # account_info is found
200, headers=headers) # container_info is found
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(200, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret)
# tests if 404 is cached and used
def test_container_info_404(self):
def account_info(self, account, request):
return True, True, 0
with save_globals():
set_http_connect(503, 204, # account_info found
504, 404, 404) # container_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
set_http_connect(503, 404, 404) # account_info 'NotFound'
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
cache_key = get_container_memcache_key(self.account,
self.container)
cache_value = self.memcache.get(cache_key)
self.assertTrue(isinstance(cache_value, dict))
self.assertEqual(404, cache_value.get('status'))
set_http_connect()
ret = self.controller.container_info(
self.account, self.container, self.request)
self.check_container_info_return(ret, True)
# tests if some http status codes are not cached
def test_container_info_no_cache(self):
def test(*status_list):
set_http_connect(*status_list)
ret = self.controller.container_info(
self.account, self.container, self.request)
self.assertEqual(len(self.memcache.keys()), 0)
self.check_container_info_return(ret, True)
with save_globals():
# We cache if we have two 404 responses - fail if only one
test(503, 503, 404)
test(504, 404, 503)
test(404, 507, 503)
test(503, 503, 503)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestProxyServer(unittest.TestCase):
def test_creation(self):
# later config should be extended to assert more config options
app = proxy_server.Application({'node_timeout': '3.5',
'recoverable_node_timeout': '1.5'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(app.node_timeout, 3.5)
self.assertEqual(app.recoverable_node_timeout, 1.5)
def test_get_object_ring(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
with patch_policies([
StoragePolicy(0, 'a', False, object_ring=123),
StoragePolicy(1, 'b', True, object_ring=456),
StoragePolicy(2, 'd', False, object_ring=789)
]):
# None means legacy so always use policy 0
ring = baseapp.get_object_ring(None)
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('0')
self.assertEqual(ring, 123)
ring = baseapp.get_object_ring('1')
self.assertEqual(ring, 456)
ring = baseapp.get_object_ring('2')
self.assertEqual(ring, 789)
# illegal values
self.assertRaises(ValueError, baseapp.get_object_ring, '99')
self.assertRaises(ValueError, baseapp.get_object_ring, 'asdf')
def test_unhandled_exception(self):
class MyApp(proxy_server.Application):
def get_controller(self, path):
raise Exception('this shouldn\'t be caught')
app = MyApp(None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
app.update_request(req)
resp = app.handle_request(req)
self.assertEqual(resp.status_int, 500)
def test_internal_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '__init__'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_inexistent_method_request(self):
baseapp = proxy_server.Application({},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
self.assertEqual(resp.status, '405 Method Not Allowed')
def test_calls_authorize_allow(self):
called = [False]
def authorize(req):
called[0] = True
with save_globals():
set_http_connect(200)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_calls_authorize_deny(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
req = Request.blank('/v1/a')
req.environ['swift.authorize'] = authorize
app.update_request(req)
app.handle_request(req)
self.assertTrue(called[0])
def test_negative_content_length(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-1'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
resp = baseapp.handle_request(
Request.blank('/', environ={'CONTENT_LENGTH': '-123'}))
self.assertEqual(resp.status, '400 Bad Request')
self.assertEqual(resp.body, 'Invalid Content-Length')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'sardine',
'REQUEST_METHOD': 'GET'}))
# This is kind of a hokey way to get the transaction ID; it'd be
# better to examine response headers, but the catch_errors
# middleware is what sets the X-Trans-Id header, and we don't have
# that available here.
self.assertTrue(logger.txn_id.endswith('-sardine'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_adds_transaction_id_length_limit(self):
swift_dir = mkdtemp()
try:
logger = FakeLogger()
baseapp = proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), logger,
container_ring=FakeLogger(),
account_ring=FakeRing())
baseapp.handle_request(
Request.blank('/info',
environ={'HTTP_X_TRANS_ID_EXTRA': 'a' * 1000,
'REQUEST_METHOD': 'GET'}))
self.assertTrue(logger.txn_id.endswith(
'-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
finally:
rmtree(swift_dir, ignore_errors=True)
def test_denied_host_header(self):
swift_dir = mkdtemp()
try:
baseapp = proxy_server.Application({'swift_dir': swift_dir,
'deny_host_headers':
'invalid_host.com'},
FakeMemcache(),
container_ring=FakeLogger(),
account_ring=FakeRing())
resp = baseapp.handle_request(
Request.blank('/v1/a/c/o',
environ={'HTTP_HOST': 'invalid_host.com'}))
self.assertEqual(resp.status, '403 Forbidden')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_node_timing(self):
baseapp = proxy_server.Application({'sorting_method': 'timing'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.node_timings, {})
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
baseapp.update_request(req)
resp = baseapp.handle_request(req)
self.assertEqual(resp.status_int, 503) # couldn't connect to anything
exp_timings = {}
self.assertEqual(baseapp.node_timings, exp_timings)
times = [time.time()]
exp_timings = {'127.0.0.1': (0.1, times[0] + baseapp.timing_expiry)}
with mock.patch('swift.proxy.server.time', lambda: times.pop(0)):
baseapp.set_node_timing({'ip': '127.0.0.1'}, 0.1)
self.assertEqual(baseapp.node_timings, exp_timings)
nodes = [{'ip': '127.0.0.1'}, {'ip': '127.0.0.2'}, {'ip': '127.0.0.3'}]
with mock.patch('swift.proxy.server.shuffle', lambda l: l):
res = baseapp.sort_nodes(nodes)
exp_sorting = [{'ip': '127.0.0.2'}, {'ip': '127.0.0.3'},
{'ip': '127.0.0.1'}]
self.assertEqual(res, exp_sorting)
def test_node_affinity(self):
baseapp = proxy_server.Application({'sorting_method': 'affinity',
'read_affinity': 'r1=1'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
nodes = [{'region': 2, 'zone': 1, 'ip': '127.0.0.1'},
{'region': 1, 'zone': 2, 'ip': '127.0.0.2'}]
with mock.patch('swift.proxy.server.shuffle', lambda x: x):
app_sorted = baseapp.sort_nodes(nodes)
exp_sorted = [{'region': 1, 'zone': 2, 'ip': '127.0.0.2'},
{'region': 2, 'zone': 1, 'ip': '127.0.0.1'}]
self.assertEqual(exp_sorted, app_sorted)
def test_info_defaults(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.assertTrue(app.expose_info)
self.assertTrue(isinstance(app.disallowed_sections, list))
self.assertEqual(1, len(app.disallowed_sections))
self.assertEqual(['swift.valid_api_versions'],
app.disallowed_sections)
self.assertTrue(app.admin_key is None)
def test_get_info_controller(self):
req = Request.blank('/info')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
controller, path_parts = app.get_controller(req)
self.assertTrue('version' in path_parts)
self.assertTrue(path_parts['version'] is None)
self.assertTrue('disallowed_sections' in path_parts)
self.assertTrue('expose_info' in path_parts)
self.assertTrue('admin_key' in path_parts)
self.assertEqual(controller.__name__, 'InfoController')
def test_error_limit_methods(self):
logger = debug_logger('test')
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
logger=logger)
node = app.container_ring.get_part_nodes(0)[0]
# error occurred
app.error_occurred(node, 'test msg')
self.assertTrue('test msg' in
logger.get_lines_for_level('error')[-1])
self.assertEqual(1, node_error_count(app, node))
# exception occurred
try:
raise Exception('kaboom1!')
except Exception as e1:
app.exception_occurred(node, 'test1', 'test1 msg')
line = logger.get_lines_for_level('error')[-1]
self.assertTrue('test1 server' in line)
self.assertTrue('test1 msg' in line)
log_args, log_kwargs = logger.log_dict['error'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e1)
self.assertEqual(2, node_error_count(app, node))
# warning exception occurred
try:
raise Exception('kaboom2!')
except Exception as e2:
app.exception_occurred(node, 'test2', 'test2 msg',
level=logging.WARNING)
line = logger.get_lines_for_level('warning')[-1]
self.assertTrue('test2 server' in line)
self.assertTrue('test2 msg' in line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e2)
self.assertEqual(3, node_error_count(app, node))
# custom exception occurred
try:
raise Exception('kaboom3!')
except Exception as e3:
e3_info = sys.exc_info()
try:
raise Exception('kaboom4!')
except Exception:
pass
app.exception_occurred(node, 'test3', 'test3 msg',
level=logging.WARNING, exc_info=e3_info)
line = logger.get_lines_for_level('warning')[-1]
self.assertTrue('test3 server' in line)
self.assertTrue('test3 msg' in line)
log_args, log_kwargs = logger.log_dict['warning'][-1]
self.assertTrue(log_kwargs['exc_info'])
self.assertEqual(log_kwargs['exc_info'][1], e3)
self.assertEqual(4, node_error_count(app, node))
def test_valid_api_version(self):
app = proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
# The version string is only checked for account, container and object
# requests; the raised APIVersionError returns a 404 to the client
for path in [
'/v2/a',
'/v2/a/c',
'/v2/a/c/o']:
req = Request.blank(path)
self.assertRaises(APIVersionError, app.get_controller, req)
# Default valid API versions are ok
for path in [
'/v1/a',
'/v1/a/c',
'/v1/a/c/o',
'/v1.0/a',
'/v1.0/a/c',
'/v1.0/a/c/o']:
req = Request.blank(path)
controller, path_parts = app.get_controller(req)
self.assertTrue(controller is not None)
# Ensure settings valid API version constraint works
for version in ["42", 42]:
try:
with NamedTemporaryFile() as f:
f.write('[swift-constraints]\n')
f.write('valid_api_versions = %s\n' % version)
f.flush()
with mock.patch.object(utils, 'SWIFT_CONF_FILE', f.name):
constraints.reload_constraints()
req = Request.blank('/%s/a' % version)
controller, _ = app.get_controller(req)
self.assertTrue(controller is not None)
# In this case v1 is invalid
req = Request.blank('/v1/a')
self.assertRaises(APIVersionError, app.get_controller, req)
finally:
constraints.reload_constraints()
# Check that the valid_api_versions is not exposed by default
req = Request.blank('/info')
controller, path_parts = app.get_controller(req)
self.assertTrue('swift.valid_api_versions' in
path_parts.get('disallowed_sections'))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
])
class TestProxyServerLoading(unittest.TestCase):
def setUp(self):
self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
utils.HASH_PATH_SUFFIX = 'endcap'
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
for policy in POLICIES:
policy.object_ring = None
def test_load_policy_rings(self):
for policy in POLICIES:
self.assertFalse(policy.object_ring)
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
account_ring_path = os.path.join(self.tempdir, 'account.ring.gz')
write_fake_ring(account_ring_path)
container_ring_path = os.path.join(self.tempdir, 'container.ring.gz')
write_fake_ring(container_ring_path)
for policy in POLICIES:
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
write_fake_ring(object_ring_path)
app = loadapp(conf_path)
# find the end of the pipeline
while hasattr(app, 'app'):
app = app.app
# validate loaded rings
self.assertEqual(app.account_ring.serialized_path,
account_ring_path)
self.assertEqual(app.container_ring.serialized_path,
container_ring_path)
for policy in POLICIES:
self.assertEqual(policy.object_ring,
app.get_object_ring(int(policy)))
def test_missing_rings(self):
conf_path = os.path.join(self.tempdir, 'proxy-server.conf')
conf_body = """
[DEFAULT]
swift_dir = %s
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
""" % self.tempdir
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
ring_paths = [
os.path.join(self.tempdir, 'account.ring.gz'),
os.path.join(self.tempdir, 'container.ring.gz'),
]
for policy in POLICIES:
self.assertFalse(policy.object_ring)
object_ring_path = os.path.join(self.tempdir,
policy.ring_name + '.ring.gz')
ring_paths.append(object_ring_path)
for policy in POLICIES:
self.assertFalse(policy.object_ring)
for ring_path in ring_paths:
self.assertFalse(os.path.exists(ring_path))
self.assertRaises(IOError, loadapp, conf_path)
write_fake_ring(ring_path)
# all rings exist, app should load
loadapp(conf_path)
for policy in POLICIES:
self.assertTrue(policy.object_ring)
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(base_port=3000))])
class TestObjectController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
# clear proxy logger result for each test
_test_servers[0].logger._clear()
def tearDown(self):
self.app.account_ring.set_replicas(3)
self.app.container_ring.set_replicas(3)
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
def put_container(self, policy_name, container_name):
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: %s\r\n'
'\r\n' % (container_name, policy_name))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
def assert_status_map(self, method, statuses, expected, raise_exc=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
# repeat test
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
try:
res = method(req)
except HTTPException as res:
pass
self.assertEqual(res.status_int, expected)
@unpatch_policies
def test_policy_IO(self):
def check_file(policy, cont, devs, check_val):
partition, nodes = policy.object_ring.get_nodes('a', cont, 'o')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
for dev in devs:
file = df_mgr.get_diskfile(dev, partition, 'a',
cont, 'o',
policy=policy)
if check_val is True:
file.open()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# check policy 0: put file on c, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'test_object0'
path = '/v1/a/c/o'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[0], 'c', ['sda1', 'sdb1'], True)
check_file(POLICIES[0], 'c', ['sdc1', 'sdd1', 'sde1', 'sdf1'], False)
# check policy 1: put file on c1, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c1/o'
obj = 'test_object1'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[1], 'c1', ['sdc1', 'sdd1'], True)
check_file(POLICIES[1], 'c1', ['sda1', 'sdb1', 'sde1', 'sdf1'], False)
# check policy 2: put file on c2, read it back, check loc on disk
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c2/o'
obj = 'test_object2'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: text/plain\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
check_file(POLICIES[2], 'c2', ['sde1', 'sdf1'], True)
check_file(POLICIES[2], 'c2', ['sda1', 'sdb1', 'sdc1', 'sdd1'], False)
@unpatch_policies
def test_policy_IO_override(self):
if hasattr(_test_servers[-1], '_filesystem'):
# ironically, the _filesystem attribute on the object server means
# the in-memory diskfile is in use, so this test does not apply
return
prosrv = _test_servers[0]
# validate container policy is 1
req = Request.blank('/v1/a/c1', method='HEAD')
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204) # sanity check
self.assertEqual(POLICIES[1].name, res.headers['x-storage-policy'])
# check overrides: put it in policy 2 (not where the container says)
req = Request.blank(
'/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': BytesIO(b"hello")},
headers={'Content-Type': 'text/plain',
'Content-Length': '5',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 201) # sanity check
# go to disk to make sure it's there
partition, nodes = prosrv.get_object_ring(2).get_nodes(
'a', 'c1', 'wrong-o')
node = nodes[0]
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileManager(conf, FakeLogger())
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
with df.open():
contents = ''.join(df.reader())
self.assertEqual(contents, "hello")
# can't get it from the normal place
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 404) # sanity check
# but we can get it from policy 2
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, 'hello')
# and we can delete it the same way
req = Request.blank('/v1/a/c1/wrong-o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'text/plain',
'X-Backend-Storage-Policy-Index': '2'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 204)
df = df_mgr.get_diskfile(node['device'], partition, 'a',
'c1', 'wrong-o', policy=POLICIES[2])
try:
df.open()
except DiskFileNotExist as e:
self.assertTrue(float(e.timestamp) > 0)
else:
self.fail('did not raise DiskFileNotExist')
@unpatch_policies
def test_GET_newest_large_file(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'a' * (1024 * 1024)
path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
req = Request.blank(path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type':
'application/octet-stream',
'X-Newest': 'true'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, obj)
@unpatch_policies
def test_GET_ranges(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('beans lots of beans lots of beans lots of beans yeah %04d ' % i)
for i in range(100)))
path = '/v1/a/c/o.beans'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one byte range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
self.assertEqual(res.body, obj[10:201])
# multiple byte ranges
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=10-200,1000-1099,4123-4523'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 206)
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges')
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None)
got_mime_docs = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(res.body),
boundary):
headers = parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_mime_docs.append((headers, body))
self.assertEqual(len(got_mime_docs), 3)
first_range_headers = got_mime_docs[0][0]
first_range_body = got_mime_docs[0][1]
self.assertEqual(first_range_headers['Content-Range'],
'bytes 10-200/5800')
self.assertEqual(first_range_body, obj[10:201])
second_range_headers = got_mime_docs[1][0]
second_range_body = got_mime_docs[1][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 1000-1099/5800')
self.assertEqual(second_range_body, obj[1000:1100])
second_range_headers = got_mime_docs[2][0]
second_range_body = got_mime_docs[2][1]
self.assertEqual(second_range_headers['Content-Range'],
'bytes 4123-4523/5800')
self.assertEqual(second_range_body, obj[4123:4524])
@unpatch_policies
def test_GET_bad_range_zero_byte(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
path = '/v1/a/c/o.zerobyte'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# bad byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=spaghetti-carbonara'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
# not a byte-range
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'Kotta'})
res = req.get_response(prosrv)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, '')
@unpatch_policies
def test_GET_ranges_resuming(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = (''.join(
('Smurf! The smurfing smurf is completely smurfed. %03d ' % i)
for i in range(1000)))
path = '/v1/a/c/o.smurfs'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/smurftet-stream\r\n'
'\r\n%s' % (path, str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
kaboomed = [0]
bytes_before_timeout = [None]
class FileLikeKaboom(object):
def __init__(self, inner_file_like):
self.inner_file_like = inner_file_like
# close(), etc.
def __getattr__(self, attr):
return getattr(self.inner_file_like, attr)
def readline(self, *a, **kw):
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
result = self.inner_file_like.readline(*a, **kw)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
def read(self, length=None):
result = self.inner_file_like.read(length)
if bytes_before_timeout[0] <= 0:
kaboomed[0] += 1
raise ChunkReadTimeout(None)
if len(result) > bytes_before_timeout[0]:
result = result[:bytes_before_timeout[0]]
bytes_before_timeout[0] -= len(result)
return result
orig_hrtdi = proxy_base.http_response_to_document_iters
# Use this to mock out http_response_to_document_iters. On the first
# call, the result will be sabotaged to blow up with
# ChunkReadTimeout after some number of bytes are read. On
# subsequent calls, no sabotage will be added.
def sabotaged_hrtdi(*a, **kw):
resp_parts = orig_hrtdi(*a, **kw)
for sb, eb, l, h, range_file in resp_parts:
if bytes_before_timeout[0] <= 0:
# simulate being unable to read MIME part of
# multipart/byteranges response
kaboomed[0] += 1
raise ChunkReadTimeout(None)
boomer = FileLikeKaboom(range_file)
yield sb, eb, l, h, boomer
sabotaged = [False]
def single_sabotage_hrtdi(*a, **kw):
if not sabotaged[0]:
sabotaged[0] = True
return sabotaged_hrtdi(*a, **kw)
else:
return orig_hrtdi(*a, **kw)
# We want sort of an end-to-end test of object resuming, so what we
# do is mock out stuff so the proxy thinks it only read a certain
# number of bytes before it got a timeout.
bytes_before_timeout[0] = 300
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Content-Type': 'application/octet-stream',
'Range': 'bytes=0-500'})
res = req.get_response(prosrv)
body = res.body # read the whole thing
self.assertEqual(kaboomed[0], 1) # sanity check
self.assertEqual(res.status_int, 206)
self.assertEqual(len(body), 501)
self.assertEqual(body, obj[:501])
# Sanity-check for multi-range resume: make sure we actually break
# in the middle of the second byterange. This test is partially
# about what happens when all the object servers break at once, and
# partially about validating all these mocks we do. After all, the
# point of resuming is that the client can't tell anything went
# wrong, so we need a test where we can't resume and something
# *does* go wrong so we can observe it.
bytes_before_timeout[0] = 700
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
sabotaged_hrtdi): # perma-broken
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''
try:
for chunk in res.app_iter:
body += chunk
except ChunkReadTimeout:
pass
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] > 0) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(len(got_byteranges[1]), 199) # partial
# Multi-range resume, resuming in the middle of the first byterange
bytes_before_timeout[0] = 300
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertEqual(kaboomed[0], 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second set
# of MIME headers
bytes_before_timeout[0] = 501
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] >= 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
# Multi-range resume, first GET dies in the middle of the second
# byterange
bytes_before_timeout[0] = 750
kaboomed[0] = 0
sabotaged[0] = False
prosrv._error_limiting = {} # clear out errors
with mock.patch.object(proxy_base, 'http_response_to_document_iters',
single_sabotage_hrtdi):
req = Request.blank(
path,
environ={'REQUEST_METHOD': 'GET'},
headers={'Range': 'bytes=0-500,1000-1500,2000-2500'})
res = req.get_response(prosrv)
body = ''.join(res.app_iter)
self.assertEqual(res.status_int, 206)
self.assertTrue(kaboomed[0] >= 1) # sanity check
ct, params = parse_content_type(res.headers['Content-Type'])
self.assertEqual(ct, 'multipart/byteranges') # sanity check
boundary = dict(params).get('boundary')
self.assertTrue(boundary is not None) # sanity check
got_byteranges = []
for mime_doc_fh in iter_multipart_mime_documents(StringIO(body),
boundary):
parse_mime_headers(mime_doc_fh)
body = mime_doc_fh.read()
got_byteranges.append(body)
self.assertEqual(len(got_byteranges), 3)
self.assertEqual(len(got_byteranges[0]), 501)
self.assertEqual(got_byteranges[0], obj[:501])
self.assertEqual(len(got_byteranges[1]), 501)
self.assertEqual(got_byteranges[1], obj[1000:1501])
self.assertEqual(len(got_byteranges[2]), 501)
self.assertEqual(got_byteranges[2], obj[2000:2501])
@unpatch_policies
def test_PUT_ec(self):
policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = 'abCD' * 10 # small, so we don't get multiple EC stripes
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
ecd = policy.pyeclib_driver
expected_pieces = set(ecd.encode(obj))
# go to disk to make sure it's there and all erasure-coded
partition, nodes = policy.object_ring.get_nodes('a', 'ec-con', 'o1')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[policy]
got_pieces = set()
got_indices = set()
got_durable = []
for node_index, node in enumerate(nodes):
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'o1',
policy=policy)
with df.open():
meta = df.get_metadata()
contents = ''.join(df.reader())
got_pieces.add(contents)
# check presence for a .durable file for the timestamp
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(policy),
partition, hash_path('a', 'ec-con', 'o1')),
utils.Timestamp(df.timestamp).internal + '.durable')
if os.path.isfile(durable_file):
got_durable.append(True)
lmeta = dict((k.lower(), v) for k, v in meta.items())
got_indices.add(
lmeta['x-object-sysmeta-ec-frag-index'])
self.assertEqual(
lmeta['x-object-sysmeta-ec-etag'],
md5(obj).hexdigest())
self.assertEqual(
lmeta['x-object-sysmeta-ec-content-length'],
str(len(obj)))
self.assertEqual(
lmeta['x-object-sysmeta-ec-segment-size'],
'4096')
self.assertEqual(
lmeta['x-object-sysmeta-ec-scheme'],
'%s 2+1' % DEFAULT_TEST_EC_TYPE)
self.assertEqual(
lmeta['etag'],
md5(contents).hexdigest())
self.assertEqual(expected_pieces, got_pieces)
self.assertEqual(set(('0', '1', '2')), got_indices)
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertTrue(num_durable_puts >= 2)
@unpatch_policies
def test_PUT_ec_multiple_segments(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
pyeclib_header_size = len(ec_policy.pyeclib_driver.encode("")[0])
segment_size = ec_policy.ec_segment_size
# Big enough to have multiple segments. Also a multiple of the
# segment size to get coverage of that path too.
obj = 'ABC' * segment_size
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o2 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# it's a 2+1 erasure code, so each fragment archive should be half
# the length of the object, plus three inline pyeclib metadata
# things (one per segment)
expected_length = (len(obj) / 2 + pyeclib_header_size * 3)
partition, nodes = ec_policy.object_ring.get_nodes(
'a', 'ec-con', 'o2')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
got_durable = []
fragment_archives = []
for node in nodes:
df = df_mgr.get_diskfile(
node['device'], partition, 'a',
'ec-con', 'o2', policy=ec_policy)
with df.open():
contents = ''.join(df.reader())
fragment_archives.append(contents)
self.assertEqual(len(contents), expected_length)
# check presence for a .durable file for the timestamp
durable_file = os.path.join(
_testdir, node['device'], storage_directory(
diskfile.get_data_dir(ec_policy),
partition, hash_path('a', 'ec-con', 'o2')),
utils.Timestamp(df.timestamp).internal + '.durable')
if os.path.isfile(durable_file):
got_durable.append(True)
# Verify that we can decode each individual fragment and that they
# are all the correct size
fragment_size = ec_policy.fragment_size
nfragments = int(
math.ceil(float(len(fragment_archives[0])) / fragment_size))
for fragment_index in range(nfragments):
fragment_start = fragment_index * fragment_size
fragment_end = (fragment_index + 1) * fragment_size
try:
frags = [fa[fragment_start:fragment_end]
for fa in fragment_archives]
seg = ec_policy.pyeclib_driver.decode(frags)
except ECDriverError:
self.fail("Failed to decode fragments %d; this probably "
"means the fragments are not the sizes they "
"should be" % fragment_index)
segment_start = fragment_index * segment_size
segment_end = (fragment_index + 1) * segment_size
self.assertEqual(seg, obj[segment_start:segment_end])
# verify at least 2 puts made it all the way to the end of 2nd
# phase, ie at least 2 .durable statuses were written
num_durable_puts = sum(d is True for d in got_durable)
self.assertTrue(num_durable_puts >= 2)
@unpatch_policies
def test_PUT_ec_object_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = '90:6A:02:60:B1:08-96da3e706025537fc42464916427727e'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/o3 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5('something else').hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 422'
self.assertEqual(headers[:len(exp)], exp)
# nothing should have made it to disk on the object servers
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'o3')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'o3', policy=POLICIES[3])
self.assertRaises(DiskFileNotExist, df.open)
@unpatch_policies
def test_PUT_ec_fragment_archive_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
# Cause a hash mismatch by feeding one particular MD5 hasher some
# extra data. The goal here is to get exactly one of the hashers in
# an object server.
countdown = [1]
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
if countdown[0] == 0:
hasher.update('wrong')
countdown[0] -= 1
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
with mock.patch('swift.obj.server.md5', busted_md5_constructor):
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/pimento HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# 2/3 of the fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'pimento')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
found = 0
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'pimento',
policy=POLICIES[3])
try:
# diskfile open won't succeed because no durable was written,
# so look under the hood for data files.
files = os.listdir(df._datadir)
num_data_files = len([f for f in files if f.endswith('.data')])
self.assertEqual(1, num_data_files)
found += 1
except OSError:
pass
self.assertEqual(found, 2)
@unpatch_policies
def test_PUT_ec_fragment_quorum_archive_etag_mismatch(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
def busted_md5_constructor(initial_str=""):
hasher = md5(initial_str)
hasher.update('wrong')
return hasher
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
commit_confirmation = \
'swift.proxy.controllers.obj.ECPutter.send_commit_confirmation'
with mock.patch('swift.obj.server.md5', busted_md5_constructor), \
mock.patch(commit_confirmation, mock_committer):
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 503' # no quorum
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=POLICIES[3])
self.assertFalse(os.path.exists(df._datadir))
@unpatch_policies
def test_PUT_ec_fragment_quorum_bad_request(self):
ec_policy = POLICIES[3]
self.put_container("ec", "ec-con")
obj = 'uvarovite-esurience-cerated-symphysic'
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
call_count = [0]
def mock_committer(self):
call_count[0] += 1
read_footer = \
'swift.obj.server.ObjectController._read_metadata_footer'
commit_confirmation = \
'swift.proxy.controllers.obj.ECPutter.send_commit_confirmation'
with mock.patch(read_footer) as read_footer_call, \
mock.patch(commit_confirmation, mock_committer):
# Emulate missing footer MIME doc in all object-servers
read_footer_call.side_effect = HTTPBadRequest(
body="couldn't find footer MIME doc")
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/quorum HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: %s\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
# Don't show a result of the bad conversation between proxy-server
# and object-server
exp = 'HTTP/1.1 503'
self.assertEqual(headers[:len(exp)], exp)
# Don't send commit to object-server if quorum responses consist of 4xx
self.assertEqual(0, call_count[0])
# no fragment archives should have landed on disk
partition, nodes = prosrv.get_object_ring(3).get_nodes(
'a', 'ec-con', 'quorum')
conf = {'devices': _testdir, 'mount_check': 'false'}
df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[ec_policy]
for node in nodes:
df = df_mgr.get_diskfile(node['device'], partition,
'a', 'ec-con', 'quorum',
policy=POLICIES[3])
self.assertFalse(os.path.exists(df._datadir))
@unpatch_policies
def test_PUT_ec_if_none_match(self):
self.put_container("ec", "ec-con")
obj = 'ananepionic-lepidophyllous-ropewalker-neglectful'
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/inm HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'If-None-Match: *\r\n'
'Etag: "%s"\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (md5(obj).hexdigest(), len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_GET_ec(self):
self.put_container("ec", "ec-con")
obj = '0123456' * 11 * 17
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/go-get-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@unpatch_policies
def test_conditional_GET_ec(self):
self.put_container("ec", "ec-con")
obj = 'this object has an etag and is otherwise unimportant'
etag = md5(obj).hexdigest()
not_etag = md5(obj + "blahblah").hexdigest()
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/conditionals HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
for verb, body in (('GET', obj), ('HEAD', '')):
# If-Match
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 412)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
# If-None-Match
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': not_etag})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
req = Request.blank(
'/v1/a/ec-con/conditionals',
environ={'REQUEST_METHOD': verb},
headers={'If-None-Match': "*"})
resp = req.get_response(prosrv)
self.assertEqual(resp.status_int, 304)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_ec_big(self):
self.put_container("ec", "ec-con")
# our EC segment size is 4 KiB, so this is multiple (3) segments;
# we'll verify that with a sanity check
obj = 'a moose once bit my sister' * 400
self.assertTrue(
len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2,
"object is too small for proper testing")
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/big-obj-get HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# This may look like a redundant test, but when things fail, this
# has a useful failure message while the subsequent one spews piles
# of garbage and demolishes your terminal's scrollback buffer.
self.assertEqual(len(gotten_obj), len(obj))
self.assertEqual(gotten_obj, obj)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_ec_failure_handling(self):
self.put_container("ec", "ec-con")
obj = 'look at this object; it is simply amazing ' * 500
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def explodey_iter(inner_iter):
yield next(inner_iter)
raise Exception("doom ba doom")
def explodey_doc_parts_iter(inner_iter_iter):
for item in inner_iter_iter:
item = item.copy() # paranoia about mutable data
item['part_iter'] = explodey_iter(item['part_iter'])
yield item
real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter
def explodey_ec_app_iter(path, policy, iterators, *a, **kw):
# Each thing in `iterators` here is a document-parts iterator,
# and we want to fail after getting a little into each part.
#
# That way, we ensure we've started streaming the response to
# the client when things go wrong.
return real_ec_app_iter(
path, policy,
[explodey_doc_parts_iter(i) for i in iterators],
*a, **kw)
with mock.patch("swift.proxy.controllers.obj.ECAppIter",
explodey_ec_app_iter):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/crash-test-dummy HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
gotten_obj = ''
try:
with Timeout(300): # don't hang the testrun when this fails
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
except Timeout:
self.fail("GET hung when connection failed")
# Ensure we failed partway through, otherwise the mocks could
# get out of date without anyone noticing
self.assertTrue(0 < len(gotten_obj) < len(obj))
@unpatch_policies
def test_HEAD_ec(self):
self.put_container("ec", "ec-con")
obj = '0123456' * 11 * 17
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'X-Object-Meta-Color: chartreuse\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/ec-con/go-head-it HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
headers = parse_headers_string(headers)
self.assertEqual(str(len(obj)), headers['Content-Length'])
self.assertEqual(md5(obj).hexdigest(), headers['Etag'])
self.assertEqual('chartreuse', headers['X-Object-Meta-Color'])
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@unpatch_policies
def test_GET_ec_404(self):
self.put_container("ec", "ec-con")
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
@unpatch_policies
def test_HEAD_ec_404(self):
self.put_container("ec", "ec-con")
prolis = _test_sockets[0]
prosrv = _test_servers[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/ec-con/yes-we-have-no-bananas HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
error_lines = prosrv.logger.get_lines_for_level('error')
warn_lines = prosrv.logger.get_lines_for_level('warning')
self.assertEqual(len(error_lines), 0) # sanity
self.assertEqual(len(warn_lines), 0) # sanity
def test_PUT_expect_header_zero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'expect' in headers or 'Expect' in headers:
test_errors.append('Expect was in headers for object '
'server!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
# The (201, Exception('test')) tuples in there have the effect of
# changing the status of the initial expect response. The default
# expect response from FakeConn for 201 is 100.
# But the object server won't send a 100 continue line if the
# client doesn't send a expect 100 header (as is the case with
# zero byte PUTs as validated by this test), nevertheless the
# object controller calls getexpect without prejudice. In this
# case the status from the response shows up early in getexpect
# instead of having to wait until getresponse. The Exception is
# in there to ensure that the object controller also *uses* the
# result of getexpect instead of calling getresponse in which case
# our FakeConn will blow up.
success_codes = [(201, Exception('test'))] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '), res.status)
def test_PUT_expect_header_nonzero_content_length(self):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
if 'Expect' not in headers:
test_errors.append('Expect was not in headers for '
'non-zero byte PUT!')
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
# the (100, 201) tuples in there are just being extra explicit
# about the FakeConn returning the 100 Continue status when the
# object controller calls getexpect. Which is FakeConn's default
# for 201 if no expect_status is specified.
success_codes = [(100, 201)] * 3
set_http_connect(200, 200, *success_codes,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertEqual(test_errors, [])
self.assertTrue(res.status.startswith('201 '))
def test_PUT_respects_write_affinity(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
set_http_connect(200, 200, 201, 201, 201,
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
for ip, port, device in written_to:
# this is kind of a hokey test, but in FakeRing, the port is even
# when the region is 0, and odd when the region is 1, so this test
# asserts that we only wrote to nodes in region 0.
self.assertEqual(0, port % 2)
def test_PUT_respects_write_affinity_with_507s(self):
written_to = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c/o.jpg':
written_to.append((ipaddr, port, device))
with save_globals():
def is_r0(node):
return node['region'] == 0
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 100
self.app.write_affinity_is_local_fn = is_r0
self.app.write_affinity_node_count = lambda r: 3
controller = \
ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
self.app.error_limit(
object_ring.get_part_nodes(1)[0], 'test')
set_http_connect(200, 200, # account, container
201, 201, 201, # 3 working backends
give_connect=test_connect)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 1
req.body = 'a'
self.app.memcache.store = {}
res = controller.PUT(req)
self.assertTrue(res.status.startswith('201 '))
self.assertEqual(3, len(written_to))
# this is kind of a hokey test, but in FakeRing, the port is even when
# the region is 0, and odd when the region is 1, so this test asserts
# that we wrote to 2 nodes in region 0, then went to 1 non-r0 node.
self.assertEqual(0, written_to[0][1] % 2) # it's (ip, port, device)
self.assertEqual(0, written_to[1][1] % 2)
self.assertNotEqual(0, written_to[2][1] % 2)
@unpatch_policies
def test_PUT_no_etag_fallocate(self):
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'hemoleucocytic-surfactant'
fd.write('PUT /v1/a/c/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# one for each obj server; this test has 2
self.assertEqual(len(mock_fallocate.mock_calls), 2)
@unpatch_policies
def test_PUT_message_length_using_content_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
obj = 'j' * 20
fd.write('PUT /v1/a/c/o.content-length HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (str(len(obj)), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_transfer_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_using_both(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 400'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_unsup_xfr_encoding(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n'
'Transfer-Encoding: gzip,chunked\r\n\r\n'
'2\r\n'
'oh\r\n'
'4\r\n'
' say\r\n'
'4\r\n'
' can\r\n'
'4\r\n'
' you\r\n'
'4\r\n'
' see\r\n'
'3\r\n'
' by\r\n'
'4\r\n'
' the\r\n'
'8\r\n'
' dawns\'\n\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 501'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_message_length_too_large(self):
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: application/octet-stream\r\n'
'Content-Length: 33\r\n\r\n'
'oh say can you see by the dawns\'\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 413'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_PUT_last_modified(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
lm_hdr = 'Last-Modified: '
self.assertEqual(headers[:len(exp)], exp)
last_modified_put = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
last_modified_head = [line for line in headers.split('\r\n')
if lm_hdr in line][0][len(lm_hdr):]
self.assertEqual(last_modified_put, last_modified_head)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Modified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_put)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 304'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o.last_modified HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'If-Unmodified-Since: %s\r\n'
'X-Storage-Token: t\r\n\r\n' % last_modified_put)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
def test_PUT_auto_content_type(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_content_type(filename, expected):
# The three responses here are for account_info() (HEAD to
# account server), container_info() (HEAD to container server)
# and three calls to _connect_put_node() (PUT to three object
# servers)
set_http_connect(201, 201, 201, 201, 201,
give_content_type=lambda content_type:
self.assertEqual(content_type,
next(expected)))
# We need into include a transfer-encoding to get past
# constraints.check_object_creation()
req = Request.blank('/v1/a/c/%s' % filename, {},
headers={'transfer-encoding': 'chunked'})
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
# If we don't check the response here we could miss problems
# in PUT()
self.assertEqual(res.status_int, 201)
test_content_type('test.jpg', iter(['', '', 'image/jpeg',
'image/jpeg', 'image/jpeg']))
test_content_type('test.html', iter(['', '', 'text/html',
'text/html', 'text/html']))
test_content_type('test.css', iter(['', '', 'text/css',
'text/css', 'text/css']))
def test_custom_mime_types_files(self):
swift_dir = mkdtemp()
try:
with open(os.path.join(swift_dir, 'mime.types'), 'w') as fp:
fp.write('foo/bar foo\n')
proxy_server.Application({'swift_dir': swift_dir},
FakeMemcache(), FakeLogger(),
FakeRing(), FakeRing())
self.assertEqual(proxy_server.mimetypes.guess_type('blah.foo')[0],
'foo/bar')
self.assertEqual(proxy_server.mimetypes.guess_type('blah.jpg')[0],
'image/jpeg')
finally:
rmtree(swift_dir, ignore_errors=True)
def test_PUT(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
self.app.memcache.store = {}
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, 201), 201)
test_status_map((200, 200, 201, 201, 500), 201)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 200, 204, 500, 404), 503)
test_status_map((200, 200, 202, 202, 204), 204)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, 201, -1), 201) # connect exc
# connect errors
test_status_map((200, 200, Timeout(), 201, 201, ), 201)
test_status_map((200, 200, 201, 201, Exception()), 201)
# expect errors
test_status_map((200, 200, (Timeout(), None), 201, 201), 201)
test_status_map((200, 200, (Exception(), None), 201, 201), 201)
# response errors
test_status_map((200, 200, (100, Timeout()), 201, 201), 201)
test_status_map((200, 200, (100, Exception()), 201, 201), 201)
test_status_map((200, 200, 507, 201, 201), 201) # error limited
test_status_map((200, 200, -1, 201, -1), 503)
test_status_map((200, 200, 503, -1, 503), 503)
def test_PUT_send_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg',
environ={'REQUEST_METHOD': 'PUT'},
body='some data')
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 201, -1, 201), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_PUT_max_size(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
def test_PUT_bad_content_type(self):
with save_globals():
set_http_connect(201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {}, headers={
'Content-Length': 0, 'Content-Type': 'foo/bar;swift_hey=45'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 400)
def test_PUT_getresponse_exceptions(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
def test_status_map(statuses, expected):
self.app.memcache.store = {}
set_http_connect(*statuses)
req = Request.blank('/v1/a/c/o.jpg', {})
req.content_length = 0
self.app.update_request(req)
try:
res = controller.PUT(req)
except HTTPException as res:
pass
expected = str(expected)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 201, 201, -1), 201)
test_status_map((200, 200, 201, -1, -1), 503)
test_status_map((200, 200, 503, 503, -1), 503)
def test_POST(self):
with save_globals():
self.app.object_post_as_copy = False
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 404, 404, 404), 404)
@patch_policies([
StoragePolicy(0, 'zero', is_default=True, object_ring=FakeRing()),
StoragePolicy(1, 'one', object_ring=FakeRing()),
])
def test_POST_backend_headers(self):
# reset the router post patch_policies
self.app.obj_controller_router = proxy_server.ObjectControllerRouter()
self.app.object_post_as_copy = False
self.app.sort_nodes = lambda nodes: nodes
backend_requests = []
def capture_requests(ip, port, method, path, headers, *args,
**kwargs):
backend_requests.append((method, path, headers))
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue'})
# we want the container_info response to says a policy index of 1
resp_headers = {'X-Backend-Storage-Policy-Index': 1}
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
def check_request(req, method, path, headers=None):
req_method, req_path, req_headers = req
self.assertEqual(method, req_method)
# caller can ignore leading path parts
self.assertTrue(req_path.endswith(path),
'expected path to end with %s, it was %s' % (
path, req_path))
headers = headers or {}
# caller can ignore some headers
for k, v in headers.items():
self.assertEqual(req_headers[k], v)
account_request = backend_requests.pop(0)
check_request(account_request, method='HEAD', path='/sda/0/a')
container_request = backend_requests.pop(0)
check_request(container_request, method='HEAD', path='/sda/0/a/c')
# make sure backend requests included expected container headers
container_headers = {}
for request in backend_requests:
req_headers = request[2]
device = req_headers['x-container-device']
host = req_headers['x-container-host']
container_headers[device] = host
expectations = {
'method': 'POST',
'path': '/0/a/c/o',
'headers': {
'X-Container-Partition': '0',
'Connection': 'close',
'User-Agent': 'proxy-server %s' % os.getpid(),
'Host': 'localhost:80',
'Referer': 'POST http://localhost/v1/a/c/o',
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '1'
},
}
check_request(request, **expectations)
expected = {}
for i, device in enumerate(['sda', 'sdb', 'sdc']):
expected[device] = '10.0.0.%d:100%d' % (i, i)
self.assertEqual(container_headers, expected)
# and again with policy override
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 202, 202, 202,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 5)
for request in backend_requests[2:]:
expectations = {
'method': 'POST',
'path': '/0/a/c/o', # ignore device bit
'headers': {
'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': '0',
}
}
check_request(request, **expectations)
# and this time with post as copy
self.app.object_post_as_copy = True
self.app.memcache.store = {}
backend_requests = []
req = Request.blank('/v1/a/c/o', {}, method='POST',
headers={'X-Object-Meta-Color': 'Blue',
'X-Backend-Storage-Policy-Index': 0})
with mocked_http_conn(
200, 200, 200, 200, 200, 201, 201, 201,
headers=resp_headers, give_connect=capture_requests
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 202)
self.assertEqual(len(backend_requests), 8)
policy0 = {'X-Backend-Storage-Policy-Index': '0'}
policy1 = {'X-Backend-Storage-Policy-Index': '1'}
expected = [
# account info
{'method': 'HEAD', 'path': '/0/a'},
# container info
{'method': 'HEAD', 'path': '/0/a/c'},
# x-newests
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
{'method': 'GET', 'path': '/0/a/c/o', 'headers': policy1},
# new writes
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
{'method': 'PUT', 'path': '/0/a/c/o', 'headers': policy0},
]
for request, expectations in zip(backend_requests, expected):
check_request(request, **expectations)
def test_POST_as_copy(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar'})
self.app.update_request(req)
res = req.get_response(self.app)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 200, 200, 200, 200, 202, 202, 202), 202)
test_status_map((200, 200, 200, 200, 200, 202, 202, 500), 202)
test_status_map((200, 200, 200, 200, 200, 202, 500, 500), 503)
test_status_map((200, 200, 200, 200, 200, 202, 404, 500), 503)
test_status_map((200, 200, 200, 200, 200, 202, 404, 404), 404)
test_status_map((200, 200, 200, 200, 200, 404, 500, 500), 503)
test_status_map((200, 200, 200, 200, 200, 404, 404, 404), 404)
def test_DELETE(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
test_status_map((200, 200, 204, 204, 204), 204)
test_status_map((200, 200, 204, 204, 500), 204)
test_status_map((200, 200, 204, 404, 404), 404)
test_status_map((200, 204, 500, 500, 404), 503)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 400, 400, 400), 400)
def test_HEAD(self):
with save_globals():
def test_status_map(statuses, expected):
set_http_connect(*statuses)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
test_status_map((200, 200, 200, 404, 404), 200)
test_status_map((200, 200, 200, 500, 404), 200)
test_status_map((200, 200, 304, 500, 404), 304)
test_status_map((200, 200, 404, 404, 404), 404)
test_status_map((200, 200, 404, 404, 500), 404)
test_status_map((200, 200, 500, 500, 500), 503)
def test_HEAD_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
# acct cont obj obj obj
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
test_status_map((200, 200, 404, 404, 200), 200, ('0', '0', None,
None, '1'), '1')
def test_GET_newest(self):
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'GET'},
headers={'x-newest': 'true'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, None), None)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
None, '1'), '1')
with save_globals():
def test_status_map(statuses, expected, timestamps,
expected_timestamp):
set_http_connect(*statuses, timestamps=timestamps)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
self.assertEqual(res.headers.get('last-modified'),
expected_timestamp)
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'2', '3'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '2'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '1',
'3', '1'), '1')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', '3',
'3', '1'), '3')
test_status_map((200, 200, 200, 200, 200), 200, ('0', '0', None,
'1', '2'), None)
def test_POST_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
self.app.object_post_as_copy = False
ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_as_copy_meta_val_len(self):
with save_globals():
limit = constraints.MAX_META_VALUE_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * limit})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
'X-Object-Meta-Foo': 'x' * (limit + 1)})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
self.app.object_post_as_copy = False
set_http_connect(200, 200, 202, 202, 202)
# acct cont obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_as_copy_meta_key_len(self):
with save_globals():
limit = constraints.MAX_META_NAME_LENGTH
set_http_connect(200, 200, 200, 200, 200, 202, 202, 202)
# acct cont objc objc objc obj obj obj
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * limit): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
set_http_connect(202, 202, 202)
req = Request.blank(
'/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'foo/bar',
('X-Object-Meta-' + 'x' * (limit + 1)): 'x'})
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_count(self):
with save_globals():
limit = constraints.MAX_META_COUNT
headers = dict(
(('X-Object-Meta-' + str(i), 'a') for i in range(limit + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_POST_meta_size(self):
with save_globals():
limit = constraints.MAX_META_OVERALL_SIZE
count = limit / 256 # enough to cause the limit to be reached
headers = dict(
(('X-Object-Meta-' + str(i), 'a' * 256)
for i in range(count + 1)))
headers.update({'Content-Type': 'foo/bar'})
set_http_connect(202, 202, 202)
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'},
headers=headers)
self.app.update_request(req)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_PUT_not_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/right', 'Content-Length': 0}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('something/right'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEqual(it_worked, [])
self.assertTrue(all(it_worked))
def test_PUT_autodetect_content_type(self):
with save_globals():
headers = {'Content-Type': 'something/wrong', 'Content-Length': 0,
'X-Detect-Content-Type': 'True'}
it_worked = []
def verify_content_type(ipaddr, port, device, partition,
method, path, headers=None,
query_string=None):
if path == '/a/c/o.html':
it_worked.append(
headers['Content-Type'].startswith('text/html'))
set_http_connect(204, 204, 201, 201, 201,
give_connect=verify_content_type)
req = Request.blank('/v1/a/c/o.html', {'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
req.get_response(self.app)
self.assertNotEqual(it_worked, [])
self.assertTrue(all(it_worked))
def test_client_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class SlowBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
if self.sent < 4:
sleep(0.1)
self.sent += 1
return ' '
return ''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.client_timeout = 0.05
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': SlowBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(201, 201, 201)
# obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_client_disconnect(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
class DisconnectedBody(object):
def __init__(self):
self.sent = 0
def read(self, size=-1):
return ''
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200, slow=0.1)
req.sent_size = 0
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=1.0)
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_read_timeout_retry(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
self.app.recoverable_node_timeout = 0.1
set_http_connect(200, 200, 200, slow=[1.0, 1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual('', resp.body)
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'a', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'a'])
resp = req.get_response(self.app)
got_exc = False
try:
self.assertEqual(resp.body, 'lalala')
except ChunkReadTimeout:
got_exc = True
self.assertTrue(not got_exc)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
set_http_connect(200, 200, 200, body='lalala',
slow=[1.0, 1.0], etags=['a', 'b', 'b'])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
def test_node_write_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
self.app.container_ring.get_nodes('account')
for dev in self.app.container_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
object_ring = self.app.get_object_ring(None)
object_ring.get_nodes('account')
for dev in object_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201, slow=0.1)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.app.node_timeout = 0.1
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '4',
'Content-Type': 'text/plain'},
body=' ')
self.app.update_request(req)
set_http_connect(201, 201, 201, slow=1.0)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_node_request_setting(self):
baseapp = proxy_server.Application({'request_node_count': '3'},
FakeMemcache(),
container_ring=FakeRing(),
account_ring=FakeRing())
self.assertEqual(baseapp.request_node_count(3), 3)
def test_iter_nodes(self):
with save_globals():
try:
object_ring = self.app.get_object_ring(None)
object_ring.max_more_nodes = 2
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 5)
object_ring.max_more_nodes = 6
self.app.request_node_count = lambda r: 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring,
partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 9)
# zero error-limited primary nodes -> no handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
object_ring.max_more_nodes = 20
partition, nodes = object_ring.get_nodes('account',
'container',
'object')
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [])
self.assertEqual(self.app.logger.get_increments(), [])
# one error-limited primary node -> one handoff warning
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
set_node_errors(self.app, object_ring._devs[0], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count'])
# two error-limited primary nodes -> two handoff warnings
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 7
self.app._error_limiting = {} # clear out errors
for i in range(2):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 7)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (5)',), {}),
(('Handoff requested (6)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count'])
# all error-limited primary nodes -> four handoff warnings,
# plus a handoff-all metric
self.app.log_handoffs = True
self.app.logger = FakeLogger()
self.app.request_node_count = lambda r: 10
object_ring.set_replicas(4) # otherwise we run out of handoffs
self.app._error_limiting = {} # clear out errors
for i in range(4):
set_node_errors(self.app, object_ring._devs[i], 999,
last_error=(2 ** 63 - 1))
collected_nodes = []
for node in self.app.iter_nodes(object_ring, partition):
collected_nodes.append(node)
self.assertEqual(len(collected_nodes), 10)
self.assertEqual(self.app.logger.log_dict['warning'], [
(('Handoff requested (7)',), {}),
(('Handoff requested (8)',), {}),
(('Handoff requested (9)',), {}),
(('Handoff requested (10)',), {})])
self.assertEqual(self.app.logger.get_increments(),
['handoff_count',
'handoff_count',
'handoff_count',
'handoff_count',
'handoff_all_count'])
finally:
object_ring.max_more_nodes = 0
def test_iter_nodes_calls_sort_nodes(self):
with mock.patch.object(self.app, 'sort_nodes') as sort_nodes:
object_ring = self.app.get_object_ring(None)
for node in self.app.iter_nodes(object_ring, 0):
pass
sort_nodes.assert_called_once_with(
object_ring.get_part_nodes(0))
def test_iter_nodes_skips_error_limited(self):
with mock.patch.object(self.app, 'sort_nodes', lambda n: n):
object_ring = self.app.get_object_ring(None)
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] in second_nodes)
self.app.error_limit(first_nodes[0], 'test')
second_nodes = list(self.app.iter_nodes(object_ring, 0))
self.assertTrue(first_nodes[0] not in second_nodes)
def test_iter_nodes_gives_extra_if_error_limited_inline(self):
object_ring = self.app.get_object_ring(None)
with mock.patch.object(self.app, 'sort_nodes', lambda n: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 6), \
mock.patch.object(object_ring, 'max_more_nodes', 99):
first_nodes = list(self.app.iter_nodes(object_ring, 0))
second_nodes = []
for node in self.app.iter_nodes(object_ring, 0):
if not second_nodes:
self.app.error_limit(node, 'test')
second_nodes.append(node)
self.assertEqual(len(first_nodes), 6)
self.assertEqual(len(second_nodes), 7)
def test_iter_nodes_with_custom_node_iter(self):
object_ring = self.app.get_object_ring(None)
node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D')
for n in range(10)]
with mock.patch.object(self.app, 'sort_nodes', lambda n: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 3):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list[:3], got_nodes)
with mock.patch.object(self.app, 'sort_nodes', lambda n: n), \
mock.patch.object(self.app, 'request_node_count',
lambda r: 1000000):
got_nodes = list(self.app.iter_nodes(object_ring, 0,
node_iter=iter(node_list)))
self.assertEqual(node_list, got_nodes)
def test_best_response_sets_headers(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object', headers=[{'X-Test': '1'},
{'X-Test': '2'},
{'X-Test': '3'}])
self.assertEqual(resp.headers['X-Test'], '1')
def test_best_response_sets_etag(self):
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object')
self.assertEqual(resp.etag, None)
resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3,
'Object',
etag='68b329da9893e34099c7d8ad5cb9c940'
)
self.assertEqual(resp.etag, '68b329da9893e34099c7d8ad5cb9c940')
def test_proxy_passes_content_type(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_type, 'x-application/test')
set_http_connect(200, 200, 200)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_proxy_passes_content_length_on_head(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 0)
set_http_connect(200, 200, 200, slow=True)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.content_length, 4)
def test_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 200, 200, 201, 201,
201), 503)
self.assert_status_map(controller.POST,
(200, 200, 200, 200, 200, 200, 202, 202,
202), 503)
self.assert_status_map(controller.DELETE,
(200, 200, 200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
200)
self.assertRaises(BaseException,
self.assert_status_map, controller.DELETE,
(200, 200, 200, 204, 204, 204), 503,
raise_exc=True)
def test_error_limiting_survives_ring_reload(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
self.assert_status_map(controller.HEAD, (200, 200, 503, 200, 200),
200)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, object_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD, (200, 200, 503, 503,
503), 503)
self.assertEqual(
node_error_count(controller.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# wipe out any state in the ring
for policy in POLICIES:
policy.object_ring = FakeRing(base_port=3000)
# and we still get an error, which proves that the
# error-limiting info survived a ring reload
self.assert_status_map(controller.HEAD, (200, 200, 200, 200, 200),
503)
def test_PUT_error_limiting(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 503, 200, 200),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 2)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 0)
self.assertTrue(
node_last_error(controller.app, odevs[0]) is not None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(node_last_error(controller.app, odevs[2]) is None)
def test_PUT_error_limiting_last_node(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
controller.app.sort_nodes = lambda l: l
object_ring = controller.app.get_object_ring(None)
# acc con obj obj obj
self.assert_status_map(controller.PUT, (200, 200, 200, 200, 503),
200)
# 2, not 1, because assert_status_map() calls the method twice
odevs = object_ring.devs
self.assertEqual(node_error_count(controller.app, odevs[0]), 0)
self.assertEqual(node_error_count(controller.app, odevs[1]), 0)
self.assertEqual(node_error_count(controller.app, odevs[2]), 2)
self.assertTrue(node_last_error(controller.app, odevs[0]) is None)
self.assertTrue(node_last_error(controller.app, odevs[1]) is None)
self.assertTrue(
node_last_error(controller.app, odevs[2]) is not None)
def test_acc_or_con_missing_returns_404(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
self.app.update_request(req)
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 404)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 503, 503)
# acct acct acct
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 200, 204, 204, 204)
# acct cont obj obj obj
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 204)
set_http_connect(200, 404, 404, 404)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 503, 503, 503)
# acct cont cont cont
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(
self.app, dev, self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200)
# acct [isn't actually called since everything
# is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev, 0, last_error=None)
for dev in self.app.container_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200)
# acct cont [isn't actually called since
# everything is error limited]
# make sure to use a fresh request without cached env
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'})
resp = getattr(controller, 'DELETE')(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_requires_container_exist(self):
with save_globals():
self.app.object_post_as_copy = False
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_PUT_POST_as_copy_requires_container_exist(self):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 404, 404, 404, 200, 200, 200)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(200, 404, 404, 404, 200, 200, 200, 200, 200, 200)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'text/plain'})
self.app.update_request(req)
resp = controller.POST(req)
self.assertEqual(resp.status_int, 404)
def test_bad_metadata(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-' + (
'a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={
'Content-Length': '0',
'X-Object-Meta-' + (
'a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Object-Meta-Too-Long': 'a' *
(constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Object-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {'Content-Length': '0'}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < constraints.MAX_META_OVERALL_SIZE - 4 - \
constraints.MAX_META_VALUE_LENGTH:
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Object-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Object-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
@contextmanager
def controller_context(self, req, *args, **kwargs):
_v, account, container, obj = utils.split_path(req.path, 4, 4, True)
controller = ReplicatedObjectController(
self.app, account, container, obj)
self.app.update_request(req)
self.app.memcache.store = {}
with save_globals():
new_connect = set_http_connect(*args, **kwargs)
yield controller
unused_status_list = []
while True:
try:
unused_status_list.append(next(new_connect.code_iter))
except StopIteration:
break
if unused_status_list:
raise self.fail('UN-USED STATUS CODES: %r' %
unused_status_list)
def test_basic_put_with_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_basic_put_with_x_copy_from_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_basic_put_with_x_copy_from_across_container(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c2/o'})
status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont conc objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c2/o')
def test_basic_put_with_x_copy_from_across_container_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c2/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c2/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
'X-Copy-From': 'c/o'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
def test_copy_non_zero_content_length_with_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5',
'X-Copy-From': 'c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 400)
def test_copy_with_slashes_in_x_copy_from(self):
# extra source path parsing
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_copy_with_slashes_in_x_copy_from_and_account(self):
# extra source path parsing
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_spaces_in_x_copy_from(self):
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2')
def test_copy_with_spaces_in_x_copy_from_and_account(self):
# space in soure path
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o%20o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_leading_slash_in_x_copy_from(self):
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
# repeat tests with leading /
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acc1 con1 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
try:
controller.PUT(req)
except HTTPException as resp:
self.assertEqual(resp.status_int // 100, 4) # client error
else:
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
def test_copy_with_no_object_in_x_copy_from_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c',
'X-Copy-From-Account': 'a'})
status_list = (200, 200)
# acct cont
with self.controller_context(req, *status_list) as controller:
try:
controller.PUT(req)
except HTTPException as resp:
self.assertEqual(resp.status_int // 100, 4) # client error
else:
raise self.fail('Invalid X-Copy-From did not raise '
'client error')
def test_copy_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 503, 503, 503)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 503)
def test_copy_server_error_reading_source_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 503, 503, 503)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 503)
def test_copy_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
# not found
status_list = (200, 200, 404, 404, 404)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
def test_copy_not_found_reading_source_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
# not found
status_list = (200, 200, 200, 200, 404, 404, 404)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 404)
def test_copy_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status_list = (200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
def test_copy_with_some_missing_sources_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
def test_copy_with_object_metadata(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
# test object metadata
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
def test_copy_with_object_metadata_and_account(self):
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay',
'X-Copy-From-Account': 'a'})
# test object metadata
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.PUT(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'), 'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
@_limit_max_file_size
def test_copy_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
# copy-from object is too large to fit in target object
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
self.app.update_request(req)
self.app.memcache.store = {}
try:
resp = controller.PUT(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
def test_basic_COPY(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o2'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_basic_COPY_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o2',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_across_containers(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c2/o'})
status_list = (200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont c2 objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_COPY_source_with_slashes_in_name(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_COPY_account_source_with_slashes_in_name(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
def test_COPY_account_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
def test_COPY_account_source_with_slashes_destination_leading_slash(self):
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from'], 'c/o/o2')
self.assertEqual(resp.headers['x-copied-from-account'], 'a')
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_account_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o',
'Destination-Account': 'a1'})
status_list = [] # no requests needed
with self.controller_context(req, *status_list) as controller:
self.assertRaises(HTTPException, controller.COPY, req)
def test_COPY_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 503, 503, 503)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 503)
def test_COPY_account_server_error_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 503, 503, 503)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 503)
def test_COPY_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 404, 404, 404)
# acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 404)
def test_COPY_account_not_found_reading_source(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 404, 404, 404)
# acct cont acct cont objc objc objc
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 404)
def test_COPY_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status_list = (200, 200, 404, 404, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
def test_COPY_account_with_some_missing_sources(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 404, 404, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
def test_COPY_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status_list = (200, 200, 200, 200, 200, 201, 201, 201)
# acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
def test_COPY_account_with_metadata(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'X-Object-Meta-Ours': 'okay',
'Destination-Account': 'a1'})
status_list = (200, 200, 200, 200, 200, 200, 200, 201, 201, 201)
# acct cont acct cont objc objc objc obj obj obj
with self.controller_context(req, *status_list) as controller:
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers.get('x-object-meta-test'),
'testing')
self.assertEqual(resp.headers.get('x-object-meta-ours'), 'okay')
self.assertEqual(resp.headers.get('x-delete-at'), '9876543210')
@_limit_max_file_size
def test_COPY_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
@_limit_max_file_size
def test_COPY_account_source_larger_than_max_file_size(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
class LargeResponseBody(object):
def __len__(self):
return constraints.MAX_FILE_SIZE + 1
def __getitem__(self, key):
return ''
copy_from_obj_body = LargeResponseBody()
status_list = (200, 200, 200, 200, 200)
# acct cont objc objc objc
kwargs = dict(body=copy_from_obj_body)
with self.controller_context(req, *status_list,
**kwargs) as controller:
try:
resp = controller.COPY(req)
except HTTPException as resp:
pass
self.assertEqual(resp.status_int, 413)
def test_COPY_newest(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
req.account = 'a'
controller.object_name = 'o'
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
# act cont objc objc objc obj obj obj
timestamps=('1', '1', '1', '3', '2', '4', '4',
'4'))
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from-last-modified'],
'3')
def test_COPY_account_newest(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
req.account = 'a'
controller.object_name = 'o'
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
# act cont acct cont objc objc objc obj obj obj
timestamps=('1', '1', '1', '1', '3', '2', '1',
'4', '4', '4'))
self.app.memcache.store = {}
resp = controller.COPY(req)
self.assertEqual(resp.status_int, 201)
self.assertEqual(resp.headers['x-copied-from-last-modified'],
'3')
def test_COPY_delete_at(self):
with save_globals():
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
self.app.update_request(req)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEqual(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_COPY_account_delete_at(self):
with save_globals():
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
backend_requests.append((method, path, headers))
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
set_http_connect(200, 200, 200, 200, 200, 200, 200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
self.app.update_request(req)
resp = controller.COPY(req)
self.assertEqual(201, resp.status_int) # sanity
for method, path, given_headers in backend_requests:
if method != 'PUT':
continue
self.assertEqual(given_headers.get('X-Delete-At'),
'9876543210')
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_chunked_put(self):
class ChunkedFile(object):
def __init__(self, bytes):
self.bytes = bytes
self.read_bytes = 0
@property
def bytes_left(self):
return self.bytes - self.read_bytes
def read(self, amt=None):
if self.read_bytes >= self.bytes:
raise StopIteration()
if not amt:
amt = self.bytes_left
data = 'a' * min(amt, self.bytes_left)
self.read_bytes += len(data)
return data
with save_globals():
set_http_connect(201, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(10)
self.app.memcache.store = {}
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int // 100, 2) # success
# test 413 entity to large
set_http_connect(201, 201, 201, 201)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'foo/bar'})
req.body_file = ChunkedFile(11)
self.app.memcache.store = {}
self.app.update_request(req)
with mock.patch('swift.common.constraints.MAX_FILE_SIZE', 10):
res = controller.PUT(req)
self.assertEqual(res.status_int, 413)
@unpatch_policies
def test_chunked_put_bad_version(self):
# Check bad version
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v0 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path(self):
# Check bad path
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET invalid HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_utf8(self):
# Check invalid utf-8
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a%80 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_path_no_controller(self):
# Check bad path, no controller
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_bad_method(self):
# Check bad method
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('LICK /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 405'
self.assertEqual(headers[:len(exp)], exp)
@unpatch_policies
def test_chunked_put_unhandled_exception(self):
# Check unhandled exception
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv) = _test_servers
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
orig_update_request = prosrv.update_request
def broken_update_request(*args, **kwargs):
raise Exception('fake: this should be printed')
prosrv.update_request = broken_update_request
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 500'
self.assertEqual(headers[:len(exp)], exp)
prosrv.update_request = orig_update_request
@unpatch_policies
def test_chunked_put_head_account(self):
# Head account, just a double check and really is here to test
# the part Application.log_request that 'enforces' a
# content_length on the response.
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('HEAD /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('\r\nContent-Length: 0\r\n' in headers)
@unpatch_policies
def test_chunked_put_utf8_all_the_way_down(self):
# Test UTF-8 Unicode all the way through the system
ustr = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xba \xe1\xbc\xb0\xce' \
'\xbf\xe1\xbd\xbb\xce\x87 \xcf\x84\xe1\xbd\xb0 \xcf' \
'\x80\xe1\xbd\xb1\xce\xbd\xcf\x84\xca\xbc \xe1\xbc' \
'\x82\xce\xbd \xe1\xbc\x90\xce\xbe\xe1\xbd\xb5\xce' \
'\xba\xce\xbf\xce\xb9 \xcf\x83\xce\xb1\xcf\x86\xe1' \
'\xbf\x86.Test'
ustr_short = '\xe1\xbc\xb8\xce\xbf\xe1\xbd\xbatest'
# Create ustr container
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List account with ustr container (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
containers = fd.read().split('\n')
self.assertTrue(ustr in containers)
# List account with ustr container (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertTrue(ustr.decode('utf8') in [l['name'] for l in listing])
# List account with ustr container (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('<name>%s</name>' % ustr in fd.read())
# Create ustr object with ustr metadata in ustr container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'X-Object-Meta-%s: %s\r\nContent-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr), quote(ustr_short),
quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# List ustr container with ustr object (test plain)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
objects = fd.read().split('\n')
self.assertTrue(ustr in objects)
# List ustr container with ustr object (test json)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=json HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
listing = json.loads(fd.read())
self.assertEqual(listing[0]['name'], ustr.decode('utf8'))
# List ustr container with ustr object (test xml)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?format=xml HTTP/1.1\r\n'
'Host: localhost\r\nConnection: close\r\n'
'X-Storage-Token: t\r\nContent-Length: 0\r\n\r\n' %
quote(ustr))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('<name>%s</name>' % ustr in fd.read())
# Retrieve ustr object with ustr metadata
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' %
(quote(ustr), quote(ustr)))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertTrue('\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower(), quote(ustr)) in headers)
@unpatch_policies
def test_chunked_put_chunked_put(self):
# Do chunked object put
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# Also happens to assert that x-storage-token is taken as a
# replacement for x-auth-token.
fd.write('PUT /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\nf\r\n123456789abcdef\r\n'
'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure we get what we put
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/o/chunky HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Auth-Token: t\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
self.assertEqual(body, 'oh hai123456789abcdef')
@unpatch_policies
def test_conditional_range_get(self):
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis,
obj3lis) = _test_sockets
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# make a container
fd = sock.makefile()
fd.write('PUT /v1/a/con HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# put an object in it
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/con/o HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: 10\r\n'
'Content-Type: text/plain\r\n'
'\r\n'
'abcdefghij\r\n')
fd.flush()
exp = 'HTTP/1.1 201'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
# request with both If-None-Match and Range
etag = md5("abcdefghij").hexdigest()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/con/o HTTP/1.1\r\n' +
'Host: localhost\r\n' +
'Connection: close\r\n' +
'X-Storage-Token: t\r\n' +
'If-None-Match: "' + etag + '"\r\n' +
'Range: bytes=3-8\r\n' +
'\r\n')
fd.flush()
exp = 'HTTP/1.1 304'
headers = readuntil2crlfs(fd)
self.assertEqual(headers[:len(exp)], exp)
def test_mismatched_etags(self):
with save_globals():
# no etag supplied, object servers return success w/ diff values
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0'})
self.app.update_request(req)
set_http_connect(200, 201, 201, 201,
etags=[None,
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941'])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 5) # server error
# req supplies etag, object servers return 422 - mismatch
headers = {'Content-Length': '0',
'ETag': '68b329da9893e34099c7d8ad5cb9c940'}
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers=headers)
self.app.update_request(req)
set_http_connect(200, 422, 422, 503,
etags=['68b329da9893e34099c7d8ad5cb9c940',
'68b329da9893e34099c7d8ad5cb9c941',
None,
None])
resp = controller.PUT(req)
self.assertEqual(resp.status_int // 100, 4) # client error
def test_response_get_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.GET(req)
self.assertTrue('accept-ranges' in resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 200)
resp = controller.HEAD(req)
self.assertTrue('accept-ranges' in resp.headers)
self.assertEqual(resp.headers['accept-ranges'], 'bytes')
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.GET(req)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_POST_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
self.app.object_post_as_copy = False
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_POST_as_copy_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_COPY_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.COPY(req)
self.assertTrue(called[0])
def test_POST_converts_delete_after_to_delete_at(self):
with save_globals():
self.app.object_post_as_copy = False
controller = ReplicatedObjectController(
self.app, 'account', 'container', 'object')
set_http_connect(200, 200, 202, 202, 202)
self.app.memcache.store = {}
orig_time = time.time
try:
t = time.time()
time.time = lambda: t
req = Request.blank('/v1/a/c/o', {},
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
self.app.update_request(req)
res = controller.POST(req)
self.assertEqual(res.status, '202 Fake')
self.assertEqual(req.headers.get('x-delete-at'),
str(int(t + 60)))
finally:
time.time = orig_time
@unpatch_policies
def test_ec_client_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# create container
fd.write('PUT /v1/a/ec-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = 'a' * 4 * 64 * 2 ** 10
fd.write('PUT /v1/a/ec-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# get object
fd.write('GET /v1/a/ec-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# read most of the object, and disconnect
fd.read(10)
sock.fd._sock.close()
sleep(0.1)
# check for disconnect message!
expected = ['Client disconnected on read'] * 2
self.assertEqual(
_test_servers[0].logger.get_lines_for_level('warning'),
expected)
@unpatch_policies
def test_ec_client_put_disconnect(self):
prolis = _test_sockets[0]
# create connection
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
# create container
fd.write('PUT /v1/a/ec-discon HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
self.assertEqual(headers[:len(exp)], exp)
# create object
obj = 'a' * 4 * 64 * 2 ** 10
fd.write('PUT /v1/a/ec-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (len(obj), obj[:-10]))
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
sleep(0.1)
expected = ['Client disconnected without sending enough data']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
@unpatch_policies
def test_leak_1(self):
_request_instances = weakref.WeakKeyDictionary()
_orig_init = Request.__init__
def request_init(self, *args, **kwargs):
_orig_init(self, *args, **kwargs)
_request_instances[self] = None
with mock.patch.object(Request, "__init__", request_init):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
obj_len = prosrv.client_chunk_size * 2
# PUT test file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n%s' % (obj_len, 'a' * obj_len))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Remember Request instance count, make sure the GC is run for
# pythons without reference counting.
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
before_request_instances = len(_request_instances)
# GET test file, but disconnect early
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/c/test_leak_1 HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Auth-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
fd.read(1)
sock.fd._sock.close()
# Make sure the GC is run again for pythons without reference
# counting
for i in range(4):
sleep(0) # let eventlet do its thing
gc.collect()
else:
sleep(0)
self.assertEqual(
before_request_instances, len(_request_instances))
def test_OPTIONS(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o.jpg')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
7)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c/o.jpg', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS COPY GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
7)
self.assertEqual('999', resp.headers['access-control-max-age'])
def test_CORS_valid(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://not.foo.bar'
}
}
controller.container_info = stubContainerInfo
controller.app.strict_cors_mode = False
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-object-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertTrue('access-control-expose-headers' in resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set(['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp',
'x-trans-id', 'x-object-meta-color'])
self.assertEqual(expected_exposed, exposed)
controller.app.strict_cors_mode = True
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertTrue('access-control-allow-origin' not in resp.headers)
def test_CORS_valid_with_obj_headers(self):
with save_globals():
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def objectGET(controller, req):
return Response(headers={
'X-Object-Meta-Color': 'red',
'X-Super-Secret': 'hush',
'Access-Control-Allow-Origin': 'http://obj.origin',
'Access-Control-Expose-Headers': 'x-trans-id'
})
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(objectGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://obj.origin',
resp.headers['access-control-allow-origin'])
self.assertEqual('x-trans-id',
resp.headers['access-control-expose-headers'])
def _gather_x_container_headers(self, controller_call, req, *connect_args,
**kwargs):
header_list = kwargs.pop('header_list', ['X-Container-Device',
'X-Container-Host',
'X-Container-Partition'])
seen_headers = []
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in header_list:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account/container HEADs, so chuck
# the first two requests
return sorted(seen_headers[2:],
key=lambda d: d.get(header_list[0]) or 'z')
def test_PUT_x_container_headers_with_equal_replicas(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_PUT_x_container_headers_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.0:1000',
'X-Container-Partition': '0',
'X-Container-Device': 'sda'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'}])
def test_PUT_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '5'}, body='12345')
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201) # HEAD HEAD PUT PUT PUT
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_POST_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.object_post_as_copy = False
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.POST, req,
200, 200, 200, 200, 200) # HEAD HEAD POST POST POST
self.assertEqual(
seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}])
def test_DELETE_x_container_headers_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Content-Type': 'application/stuff'})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.DELETE, req,
200, 200, 200, 200, 200) # HEAD HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Container-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Container-Partition': '0',
'X-Container-Device': 'sda,sdd'},
{'X-Container-Host': '10.0.0.1:1001',
'X-Container-Partition': '0',
'X-Container-Device': 'sdb'},
{'X-Container-Host': '10.0.0.2:1002',
'X-Container-Partition': '0',
'X-Container-Device': 'sdc'}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_fewer_container_replicas(self):
self.app.container_ring.set_replicas(2)
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': '0',
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': None,
'X-Delete-At-Container': None,
'X-Delete-At-Partition': None,
'X-Delete-At-Device': None}
])
@mock.patch('time.time', new=lambda: STATIC_TIME)
def test_PUT_x_delete_at_with_more_container_replicas(self):
self.app.container_ring.set_replicas(4)
self.app.expiring_objects_account = 'expires'
self.app.expiring_objects_container_divisor = 60
delete_at_timestamp = int(time.time()) + 100000
delete_at_container = utils.get_expirer_container(
delete_at_timestamp, self.app.expiring_objects_container_divisor,
'a', 'c', 'o')
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Type': 'application/stuff',
'Content-Length': 0,
'X-Delete-At': str(delete_at_timestamp)})
controller = ReplicatedObjectController(
self.app, 'a', 'c', 'o')
seen_headers = self._gather_x_container_headers(
controller.PUT, req,
200, 200, 201, 201, 201, # HEAD HEAD PUT PUT PUT
header_list=('X-Delete-At-Host', 'X-Delete-At-Device',
'X-Delete-At-Partition', 'X-Delete-At-Container'))
self.assertEqual(seen_headers, [
{'X-Delete-At-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sda,sdd'},
{'X-Delete-At-Host': '10.0.0.1:1001',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdb'},
{'X-Delete-At-Host': '10.0.0.2:1002',
'X-Delete-At-Container': delete_at_container,
'X-Delete-At-Partition': '0',
'X-Delete-At-Device': 'sdc'}
])
class TestECMismatchedFA(unittest.TestCase):
def tearDown(self):
prosrv = _test_servers[0]
# don't leak error limits and poison other tests
prosrv._error_limiting = {}
def test_mixing_different_objects_fragment_archives(self):
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv,
obj2srv, obj3srv) = _test_servers
ec_policy = POLICIES[3]
@public
def bad_disk(req):
return Response(status=507, body="borken")
ensure_container = Request.blank(
"/v1/a/ec-crazytown",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"})
resp = ensure_container.get_response(prosrv)
self.assertTrue(resp.status_int in (201, 202))
obj1 = "first version..."
put_req1 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req1.body = obj1
obj2 = u"versión segundo".encode("utf-8")
put_req2 = Request.blank(
"/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "PUT"},
headers={"X-Auth-Token": "t"})
put_req2.body = obj2
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
# Server obj1 will have the first version of the object (obj2 also
# gets it, but that gets stepped on later)
prosrv._error_limiting = {}
with mock.patch.object(obj3srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# Servers obj2 and obj3 will have the second version of the object.
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'PUT', bad_disk), \
mock.patch(
'swift.common.storage_policy.ECStoragePolicy.quorum'):
type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
# A GET that only sees 1 fragment archive should fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk), \
mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
# A GET that sees 2 matching FAs will work
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj1srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
with mock.patch.object(obj2srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 503)
class TestObjectDisconnectCleanup(unittest.TestCase):
# update this if you need to make more different devices in do_setup
device_pattern = re.compile('sd[a-z][0-9]')
def _cleanup_devices(self):
# make sure all the object data is cleaned up
for dev in os.listdir(_testdir):
if not self.device_pattern.match(dev):
continue
device_path = os.path.join(_testdir, dev)
for datadir in os.listdir(device_path):
if 'object' not in datadir:
continue
data_path = os.path.join(device_path, datadir)
rmtree(data_path, ignore_errors=True)
mkdirs(data_path)
def setUp(self):
debug.hub_exceptions(False)
self._cleanup_devices()
def tearDown(self):
debug.hub_exceptions(True)
self._cleanup_devices()
def _check_disconnect_cleans_up(self, policy_name, is_chunked=False):
proxy_port = _test_sockets[0].getsockname()[1]
def put(path, headers=None, body=None):
conn = httplib.HTTPConnection('localhost', proxy_port)
try:
conn.connect()
conn.putrequest('PUT', path)
for k, v in (headers or {}).items():
conn.putheader(k, v)
conn.endheaders()
body = body or ['']
for chunk in body:
if is_chunked:
chunk = '%x\r\n%s\r\n' % (len(chunk), chunk)
conn.send(chunk)
resp = conn.getresponse()
body = resp.read()
finally:
# seriously - shut this mother down
if conn.sock:
conn.sock.fd._sock.close()
return resp, body
# ensure container
container_path = '/v1/a/%s-disconnect-test' % policy_name
resp, _body = put(container_path, headers={
'Connection': 'close',
'X-Storage-Policy': policy_name,
'Content-Length': '0',
})
self.assertIn(resp.status, (201, 202))
def exploding_body():
for i in range(3):
yield '\x00' * (64 * 2 ** 10)
raise Exception('kaboom!')
headers = {}
if is_chunked:
headers['Transfer-Encoding'] = 'chunked'
else:
headers['Content-Length'] = 64 * 2 ** 20
obj_path = container_path + '/disconnect-data'
try:
resp, _body = put(obj_path, headers=headers,
body=exploding_body())
except Exception as e:
if str(e) != 'kaboom!':
raise
else:
self.fail('obj put connection did not ka-splod')
sleep(0.1)
def find_files(self):
found_files = defaultdict(list)
for root, dirs, files in os.walk(_testdir):
for fname in files:
filename, ext = os.path.splitext(fname)
found_files[ext].append(os.path.join(root, fname))
return found_files
def test_repl_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero')
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec')
found_files = self.find_files()
self.assertEqual(found_files['.durable'], [])
self.assertEqual(found_files['.data'], [])
def test_repl_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('zero', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.data'], [])
def test_ec_chunked_transfer_disconnect_cleans_up(self):
self._check_disconnect_cleans_up('ec', is_chunked=True)
found_files = self.find_files()
self.assertEqual(found_files['.durable'], [])
self.assertEqual(found_files['.data'], [])
class TestObjectECRangedGET(unittest.TestCase):
def setUp(self):
_test_servers[0].logger._clear()
self.app = proxy_server.Application(
None, FakeMemcache(),
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(),
container_ring=FakeRing())
def tearDown(self):
prosrv = _test_servers[0]
self.assertFalse(prosrv.logger.get_lines_for_level('error'))
self.assertFalse(prosrv.logger.get_lines_for_level('warning'))
@classmethod
def setUpClass(cls):
cls.obj_name = 'range-get-test'
cls.tiny_obj_name = 'range-get-test-tiny'
cls.aligned_obj_name = 'range-get-test-aligned'
# Note: only works if called with unpatched policies
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: 0\r\n'
'X-Storage-Token: t\r\n'
'X-Storage-Policy: ec\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2'
assert headers[:len(exp)] == exp, "container PUT failed"
seg_size = POLICIES.get_by_name("ec").ec_segment_size
cls.seg_size = seg_size
# EC segment size is 4 KiB, hence this gives 4 segments, which we
# then verify with a quick sanity check
cls.obj = ' my hovercraft is full of eels '.join(
str(s) for s in range(431))
assert seg_size * 4 > len(cls.obj) > seg_size * 3, \
"object is wrong number of segments"
cls.tiny_obj = 'tiny, tiny object'
assert len(cls.tiny_obj) < seg_size, "tiny_obj too large"
cls.aligned_obj = "".join(
"abcdEFGHijkl%04d" % x for x in range(512))
assert len(cls.aligned_obj) % seg_size == 0, "aligned obj not aligned"
for obj_name, obj in ((cls.obj_name, cls.obj),
(cls.tiny_obj_name, cls.tiny_obj),
(cls.aligned_obj_name, cls.aligned_obj)):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Content-Length: %d\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n%s' % (obj_name, len(obj), obj))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
assert headers[:len(exp)] == exp, \
"object PUT failed %s" % obj_name
def _get_obj(self, range_value, obj_name=None):
if obj_name is None:
obj_name = self.obj_name
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/ec-con/%s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Range: %s\r\n'
'\r\n' % (obj_name, range_value))
fd.flush()
headers = readuntil2crlfs(fd)
# e.g. "HTTP/1.1 206 Partial Content\r\n..."
status_code = int(headers[9:12])
headers = parse_headers_string(headers)
gotten_obj = ''
while True:
buf = fd.read(64)
if not buf:
break
gotten_obj += buf
# if we get this wrong, clients will either get truncated data or
# they'll hang waiting for bytes that aren't coming, so it warrants
# being asserted for every test case
if 'Content-Length' in headers:
self.assertEqual(int(headers['Content-Length']), len(gotten_obj))
# likewise, if we say MIME and don't send MIME or vice versa,
# clients will be horribly confused
if headers.get('Content-Type', '').startswith('multipart/byteranges'):
self.assertEqual(gotten_obj[:2], "--")
else:
# In general, this isn't true, as you can start an object with
# "--". However, in this test, we don't start any objects with
# "--", or even include "--" in their contents anywhere.
self.assertNotEqual(gotten_obj[:2], "--")
return (status_code, headers, gotten_obj)
def _parse_multipart(self, content_type, body):
parser = email.parser.FeedParser()
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
parser.feed(body)
root_message = parser.close()
self.assertTrue(root_message.is_multipart())
byteranges = root_message.get_payload()
self.assertFalse(root_message.defects)
for i, message in enumerate(byteranges):
self.assertFalse(message.defects, "Part %d had defects" % i)
self.assertFalse(message.is_multipart(),
"Nested multipart at %d" % i)
return byteranges
def test_bogus(self):
status, headers, gotten_obj = self._get_obj("tacos=3-5")
self.assertEqual(status, 200)
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_unaligned(self):
# One segment's worth of data, but straddling two segment boundaries
# (so it has data from three segments)
status, headers, gotten_obj = self._get_obj("bytes=3783-7878")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 3783-7878/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[3783:7879])
def test_aligned_left(self):
# First byte is aligned to a segment boundary, last byte is not
status, headers, gotten_obj = self._get_obj("bytes=0-5500")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "5501")
self.assertEqual(headers['Content-Range'], "bytes 0-5500/14513")
self.assertEqual(len(gotten_obj), 5501)
self.assertEqual(gotten_obj, self.obj[:5501])
def test_aligned_range(self):
# Ranged GET that wants exactly one segment
status, headers, gotten_obj = self._get_obj("bytes=4096-8191")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/14513")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.obj[4096:8192])
def test_aligned_range_end(self):
# Ranged GET that wants exactly the last segment
status, headers, gotten_obj = self._get_obj("bytes=12288-14512")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "2225")
self.assertEqual(headers['Content-Range'], "bytes 12288-14512/14513")
self.assertEqual(len(gotten_obj), 2225)
self.assertEqual(gotten_obj, self.obj[12288:])
def test_aligned_range_aligned_obj(self):
# Ranged GET that wants exactly the last segment, which is full-size
status, headers, gotten_obj = self._get_obj("bytes=4096-8191",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "4096")
self.assertEqual(headers['Content-Range'], "bytes 4096-8191/8192")
self.assertEqual(len(gotten_obj), 4096)
self.assertEqual(gotten_obj, self.aligned_obj[4096:8192])
def test_byte_0(self):
# Just the first byte, but it's index 0, so that's easy to get wrong
status, headers, gotten_obj = self._get_obj("bytes=0-0")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], "1")
self.assertEqual(headers['Content-Range'], "bytes 0-0/14513")
self.assertEqual(gotten_obj, self.obj[0])
def test_unsatisfiable(self):
# Goes just one byte too far off the end of the object, so it's
# unsatisfiable
status, _junk, _junk = self._get_obj(
"bytes=%d-%d" % (len(self.obj), len(self.obj) + 100))
self.assertEqual(status, 416)
def test_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_aligned_off_end(self):
# Ranged GET that starts on a segment boundary but asks for a whole lot
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (8192, len(self.obj) + 100))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '6321')
self.assertEqual(headers['Content-Range'], 'bytes 8192-14512/14513')
self.assertEqual(gotten_obj, self.obj[8192:])
def test_way_off_end(self):
# Ranged GET that's mostly off the end of the object, but overlaps
# it in just the last byte, and wants multiple segments' worth off
# the end
status, headers, gotten_obj = self._get_obj(
"bytes=%d-%d" % (len(self.obj) - 1, len(self.obj) * 1000))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '1')
self.assertEqual(headers['Content-Range'], 'bytes 14512-14512/14513')
self.assertEqual(gotten_obj, self.obj[-1])
def test_boundaries(self):
# Wants the last byte of segment 1 + the first byte of segment 2
status, headers, gotten_obj = self._get_obj("bytes=4095-4096")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '2')
self.assertEqual(headers['Content-Range'], 'bytes 4095-4096/14513')
self.assertEqual(gotten_obj, self.obj[4095:4097])
def test_until_end(self):
# Wants the last byte of segment 1 + the rest
status, headers, gotten_obj = self._get_obj("bytes=4095-")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '10418')
self.assertEqual(headers['Content-Range'], 'bytes 4095-14512/14513')
self.assertEqual(gotten_obj, self.obj[4095:])
def test_small_suffix(self):
# Small range-suffix GET: the last 100 bytes (less than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-100")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 14413-14512/14513')
self.assertEqual(len(gotten_obj), 100)
self.assertEqual(gotten_obj, self.obj[-100:])
def test_small_suffix_aligned(self):
# Small range-suffix GET: the last 100 bytes, last segment is
# full-size
status, headers, gotten_obj = self._get_obj("bytes=-100",
self.aligned_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '100')
self.assertEqual(headers['Content-Range'], 'bytes 8092-8191/8192')
self.assertEqual(len(gotten_obj), 100)
def test_suffix_two_segs(self):
# Ask for enough data that we need the last two segments. The last
# segment is short, though, so this ensures we compensate for that.
#
# Note that the total range size is less than one full-size segment.
suffix_len = len(self.obj) % self.seg_size + 1
status, headers, gotten_obj = self._get_obj("bytes=-%d" % suffix_len)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], str(suffix_len))
self.assertEqual(headers['Content-Range'],
'bytes %d-%d/%d' % (len(self.obj) - suffix_len,
len(self.obj) - 1,
len(self.obj)))
self.assertEqual(len(gotten_obj), suffix_len)
def test_large_suffix(self):
# Large range-suffix GET: the last 5000 bytes (more than one segment)
status, headers, gotten_obj = self._get_obj("bytes=-5000")
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5000')
self.assertEqual(headers['Content-Range'], 'bytes 9513-14512/14513')
self.assertEqual(len(gotten_obj), 5000)
self.assertEqual(gotten_obj, self.obj[-5000:])
def test_overlarge_suffix(self):
# The last N+1 bytes of an N-byte object
status, headers, gotten_obj = self._get_obj(
"bytes=-%d" % (len(self.obj) + 1))
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '14513')
self.assertEqual(headers['Content-Range'], 'bytes 0-14512/14513')
self.assertEqual(len(gotten_obj), len(self.obj))
self.assertEqual(gotten_obj, self.obj)
def test_small_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-5", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '5')
self.assertEqual(headers['Content-Range'], 'bytes 12-16/17')
self.assertEqual(gotten_obj, self.tiny_obj[12:])
def test_overlarge_suffix_tiny_object(self):
status, headers, gotten_obj = self._get_obj(
"bytes=-1234567890", self.tiny_obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers['Content-Length'], '17')
self.assertEqual(headers['Content-Range'], 'bytes 0-16/17')
self.assertEqual(len(gotten_obj), len(self.tiny_obj))
self.assertEqual(gotten_obj, self.tiny_obj)
def test_multiple_ranges(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4490-5010", self.obj_name)
self.assertEqual(status, 206)
self.assertEqual(headers["Content-Length"], str(len(gotten_obj)))
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4490-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4490:5011])
def test_multiple_ranges_overlapping_in_segment(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-9,20-29,40-49,60-69,80-89")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 5)
def test_multiple_ranges_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14500-14513") # there is no byte 14513, only 0-14512
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_suffix_off_end(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,-13")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 14500-14512/14513")
def test_multiple_ranges_one_barely_unsatisfiable(self):
# The thing about 14515-14520 is that it comes from the last segment
# in the object. When we turn this range into a fragment range,
# it'll be for the last fragment, so the object servers see
# something satisfiable.
#
# Basically, we'll get 3 byteranges from the object server, but we
# have to filter out the unsatisfiable one on our own.
status, headers, gotten_obj = self._get_obj(
"bytes=0-10,14515-14520,40-50")
self.assertEqual(status, 206)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
self.assertEqual(got_byteranges[0]['Content-Range'],
"bytes 0-10/14513")
self.assertEqual(got_byteranges[0].get_payload(), self.obj[0:11])
self.assertEqual(got_byteranges[1]['Content-Range'],
"bytes 40-50/14513")
self.assertEqual(got_byteranges[1].get_payload(), self.obj[40:51])
def test_multiple_ranges_some_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,4090-5010,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
def test_two_ranges_one_unsatisfiable(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,999999-9999999", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
# According to RFC 7233, this could be either a multipart/byteranges
# response with one part or it could be a single-part response (just
# the bytes, no MIME). We're locking it down here: single-part
# response. That's what replicated objects do, and we don't want any
# client-visible differences between EC objects and replicated ones.
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[:101])
def test_two_ranges_one_unsatisfiable_same_segment(self):
# Like test_two_ranges_one_unsatisfiable(), but where both ranges
# fall within the same EC segment.
status, headers, gotten_obj = self._get_obj(
"bytes=14500-14510,14520-14530")
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
self.assertEqual(content_type, 'donuts')
self.assertEqual(gotten_obj, self.obj[14500:14511])
def test_multiple_ranges_some_unsatisfiable_out_of_order(self):
status, headers, gotten_obj = self._get_obj(
"bytes=0-100,99999998-99999999,4090-5010", self.obj_name)
self.assertEqual(status, 206)
content_type, content_type_params = parse_content_type(
headers['Content-Type'])
content_type_params = dict(content_type_params)
self.assertEqual(content_type, 'multipart/byteranges')
boundary = content_type_params.get('boundary')
self.assertTrue(boundary is not None)
got_byteranges = self._parse_multipart(headers['Content-Type'],
gotten_obj)
self.assertEqual(len(got_byteranges), 2)
first_byterange, second_byterange = got_byteranges
self.assertEqual(first_byterange['Content-Range'],
'bytes 0-100/14513')
self.assertEqual(first_byterange.get_payload(), self.obj[:101])
self.assertEqual(second_byterange['Content-Range'],
'bytes 4090-5010/14513')
self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011])
@patch_policies([
StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)),
StoragePolicy(1, 'one', False, object_ring=FakeRing(base_port=3000)),
StoragePolicy(2, 'two', False, True, object_ring=FakeRing(base_port=3000))
])
class TestContainerController(unittest.TestCase):
"Test swift.proxy_server.ContainerController"
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(base_port=2000),
logger=debug_logger())
def test_convert_policy_to_index(self):
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
expected = {
'zero': 0,
'ZeRo': 0,
'one': 1,
'OnE': 1,
}
for name, index in expected.items():
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': name})
self.assertEqual(controller._convert_policy_to_index(req), index)
# default test
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.assertEqual(controller._convert_policy_to_index(req), None)
# negative test
req = Request.blank('/a/c',
headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'nada'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
# storage policy two is deprecated
req = Request.blank('/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain',
'X-Storage-Policy': 'two'})
self.assertRaises(HTTPException, controller._convert_policy_to_index,
req)
def test_convert_index_to_name(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': int(policy)},
) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], policy.name)
def test_no_convert_index_to_name_when_container_not_found(self):
policy = random.choice(list(POLICIES))
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 404, 404, 404,
headers={'X-Backend-Storage-Policy-Index':
int(policy)}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Storage-Policy'], None)
def test_error_convert_index_to_name(self):
req = Request.blank('/v1/a/c')
with mocked_http_conn(
200, 200,
headers={'X-Backend-Storage-Policy-Index': '-1'}) as fake_conn:
resp = req.get_response(self.app)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['X-Storage-Policy'], None)
error_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(2, len(error_lines))
for msg in error_lines:
expected = "Could not translate " \
"X-Backend-Storage-Policy-Index ('-1')"
self.assertTrue(expected in msg)
def test_transfer_headers(self):
src_headers = {'x-remove-versions-location': 'x',
'x-container-read': '*:user',
'x-remove-container-sync-key': 'x'}
dst_headers = {'x-versions-location': 'backup'}
controller = swift.proxy.controllers.ContainerController(self.app,
'a', 'c')
controller.transfer_headers(src_headers, dst_headers)
expected_headers = {'x-versions-location': '',
'x-container-read': '*:user',
'x-container-sync-key': ''}
self.assertEqual(dst_headers, expected_headers)
def assert_status_map(self, method, statuses, expected,
raise_exc=False, missing_container=False):
with save_globals():
kwargs = {}
if raise_exc:
kwargs['raise_exc'] = raise_exc
kwargs['missing_container'] = missing_container
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c/', headers={'Content-Length': '0',
'Content-Type': 'text/plain'})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
def test_HEAD_GET(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def test_status_map(statuses, expected,
c_expected=None, a_expected=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.HEAD(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if c_expected:
self.assertTrue('swift.container/a/c' in res.environ)
self.assertEqual(
res.environ['swift.container/a/c']['status'],
c_expected)
else:
self.assertTrue('swift.container/a/c' not in res.environ)
if a_expected:
self.assertTrue('swift.account/a' in res.environ)
self.assertEqual(res.environ['swift.account/a']['status'],
a_expected)
else:
self.assertTrue('swift.account/a' not in res.environ)
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status[:len(str(expected))],
str(expected))
if expected < 400:
self.assertTrue('x-works' in res.headers)
self.assertEqual(res.headers['x-works'], 'yes')
if c_expected:
self.assertTrue('swift.container/a/c' in res.environ)
self.assertEqual(
res.environ['swift.container/a/c']['status'],
c_expected)
else:
self.assertTrue('swift.container/a/c' not in res.environ)
if a_expected:
self.assertTrue('swift.account/a' in res.environ)
self.assertEqual(res.environ['swift.account/a']['status'],
a_expected)
else:
self.assertTrue('swift.account/a' not in res.environ)
# In all the following tests cache 200 for account
# return and ache vary for container
# return 200 and cache 200 for and container
test_status_map((200, 200, 404, 404), 200, 200, 200)
test_status_map((200, 200, 500, 404), 200, 200, 200)
# return 304 don't cache container
test_status_map((200, 304, 500, 404), 304, None, 200)
# return 404 and cache 404 for container
test_status_map((200, 404, 404, 404), 404, 404, 200)
test_status_map((200, 404, 404, 500), 404, 404, 200)
# return 503, don't cache container
test_status_map((200, 500, 500, 500), 503, None, 200)
self.assertFalse(self.app.account_autocreate)
# In all the following tests cache 404 for account
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# This should make no difference
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 404)
def test_PUT_policy_headers(self):
backend_requests = []
def capture_requests(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
if method == 'PUT':
backend_requests.append(headers)
def test_policy(requested_policy):
with save_globals():
mock_conn = set_http_connect(200, 201, 201, 201,
give_connect=capture_requests)
self.app.memcache.store = {}
req = Request.blank('/v1/a/test', method='PUT',
headers={'Content-Length': 0})
if requested_policy:
expected_policy = requested_policy
req.headers['X-Storage-Policy'] = policy.name
else:
expected_policy = POLICIES.default
res = req.get_response(self.app)
if expected_policy.is_deprecated:
self.assertEqual(res.status_int, 400)
self.assertEqual(0, len(backend_requests))
expected = 'is deprecated'
self.assertTrue(expected in res.body,
'%r did not include %r' % (
res.body, expected))
return
self.assertEqual(res.status_int, 201)
self.assertEqual(
expected_policy.object_ring.replicas,
len(backend_requests))
for headers in backend_requests:
if not requested_policy:
self.assertFalse('X-Backend-Storage-Policy-Index' in
headers)
self.assertTrue(
'X-Backend-Storage-Policy-Default' in headers)
self.assertEqual(
int(expected_policy),
int(headers['X-Backend-Storage-Policy-Default']))
else:
self.assertTrue('X-Backend-Storage-Policy-Index' in
headers)
self.assertEqual(int(headers
['X-Backend-Storage-Policy-Index']),
int(policy))
# make sure all mocked responses are consumed
self.assertRaises(StopIteration, mock_conn.code_iter.next)
test_policy(None) # no policy header
for policy in POLICIES:
backend_requests = [] # reset backend requests
test_policy(policy)
def test_PUT(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
# fail to retrieve account info
test_status_map(
(503, 503, 503), # account_info fails on 503
404, missing_container=True)
# account fail after creation
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
404, 404, 404), # account_info fail
404, missing_container=True)
test_status_map(
(503, 503, 404, # account_info fails on 404
503, 503, 503, # PUT account
503, 503, 404), # account_info fail
404, missing_container=True)
# put fails
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
503, 503, 201), # put container fail
503, missing_container=True)
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True)
test_status_map(
(503, 404, 404, # account_info fails on 404
503, 201, 201, # PUT account
503, 200, # account_info success
503, 201, 201), # put container success
201, missing_container=True)
def test_PUT_autocreate_account_with_sysmeta(self):
# x-account-sysmeta headers in a container PUT request should be
# transferred to the account autocreate PUT request
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, headers=None, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {}, headers=headers)
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
self.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
# all goes according to plan
test_status_map(
(404, 404, 404, # account_info fails on 404
201, 201, 201, # PUT account
200, # account_info success
201, 201, 201), # put container success
201, missing_container=True,
headers=headers,
give_connect=callback)
self.assertEqual(10, len(calls))
for call in calls[3:6]:
self.assertEqual('/account', call['path'])
self.assertTrue(key in call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_POST(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c', {})
req.content_length = 0
self.app.update_request(req)
res = controller.POST(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((200, 201, 201, 201), 201, missing_container=True)
test_status_map((200, 201, 201, 500), 201, missing_container=True)
test_status_map((200, 204, 404, 404), 404, missing_container=True)
test_status_map((200, 204, 500, 404), 503, missing_container=True)
self.assertFalse(self.app.account_autocreate)
test_status_map((404, 404, 404), 404, missing_container=True)
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, missing_container=True)
def test_PUT_max_containers_per_account(self):
with save_globals():
self.app.max_containers_per_account = 12346
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
self.app.max_containers_per_account = 12345
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'container_new')
self.assert_status_map(controller.PUT, (200, 404, 404, 404), 403,
missing_container=True)
self.app.max_containers_per_account = 12345
self.app.max_containers_whitelist = ['account']
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
def test_PUT_max_container_name_length(self):
with save_globals():
limit = constraints.MAX_CONTAINER_NAME_LENGTH
controller = proxy_server.ContainerController(self.app, 'account',
'1' * limit)
self.assert_status_map(controller.PUT,
(200, 201, 201, 201), 201,
missing_container=True)
controller = proxy_server.ContainerController(self.app, 'account',
'2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400,
missing_container=True)
def test_PUT_connect_exceptions(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.PUT, (200, 201, 201, -1), 201,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 201, -1, -1), 503,
missing_container=True)
self.assert_status_map(controller.PUT, (200, 503, 503, -1), 503,
missing_container=True)
def test_acc_missing_returns_404(self):
for meth in ('DELETE', 'PUT'):
with save_globals():
self.app.memcache = FakeMemcacheReturnsNone()
self.app._error_limiting = {}
controller = proxy_server.ContainerController(self.app,
'account',
'container')
if meth == 'PUT':
set_http_connect(200, 200, 200, 200, 200, 200,
missing_container=True)
else:
set_http_connect(200, 200, 200, 200)
self.app.memcache.store = {}
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
self.app.update_request(req)
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 200)
set_http_connect(404, 404, 404, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, 404)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
set_http_connect(503, 404, raise_exc=True)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
for dev in self.app.account_ring.devs:
set_node_errors(self.app, dev,
self.app.error_suppression_limit + 1,
time.time())
set_http_connect(200, 200, 200, 200, 200, 200)
# Make sure it is a blank request wthout env caching
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': meth})
resp = getattr(controller, meth)(req)
self.assertEqual(resp.status_int, 404)
def test_put_locking(self):
class MockMemcache(FakeMemcache):
def __init__(self, allow_lock=None):
self.allow_lock = allow_lock
super(MockMemcache, self).__init__()
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
if self.allow_lock:
yield True
else:
raise NotImplementedError
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.app.memcache = MockMemcache(allow_lock=True)
set_http_connect(200, 201, 201, 201,
missing_container=True)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
self.app.update_request(req)
res = controller.PUT(req)
self.assertEqual(res.status_int, 201)
def test_error_limiting(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
container_ring = controller.app.container_ring
controller.app.sort_nodes = lambda l: l
self.assert_status_map(controller.HEAD, (200, 503, 200, 200), 200,
missing_container=False)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]), 2)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
for _junk in range(self.app.error_suppression_limit):
self.assert_status_map(controller.HEAD,
(200, 503, 503, 503), 503)
self.assertEqual(
node_error_count(controller.app, container_ring.devs[0]),
self.app.error_suppression_limit + 1)
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 503)
self.assertTrue(
node_last_error(controller.app, container_ring.devs[0])
is not None)
self.assert_status_map(controller.PUT, (200, 201, 201, 201), 503,
missing_container=True)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 503)
self.app.error_suppression_interval = -300
self.assert_status_map(controller.HEAD, (200, 200, 200, 200), 200)
self.assert_status_map(controller.DELETE, (200, 204, 204, 204),
404, raise_exc=True)
def test_DELETE(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'account',
'container')
self.assert_status_map(controller.DELETE,
(200, 204, 204, 204), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 204, 503), 204)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 503), 503)
self.assert_status_map(controller.DELETE,
(200, 204, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
self.assert_status_map(controller.DELETE,
(200, 204, 503, 404), 503)
self.app.memcache = FakeMemcacheReturnsNone()
# 200: Account check, 404x3: Container check
self.assert_status_map(controller.DELETE,
(200, 404, 404, 404), 404)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Container-Meta-TestHeader', 'TestValue'),
('X-Container-Meta-TestHeader', ''),
('X-Remove-Container-Meta-TestHeader', 'anything'),
('X-Container-Read', '.r:*'),
('X-Remove-Container-Read', 'anything'),
('X-Container-Write', 'anyone'),
('X-Remove-Container-Write', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a/c':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
controller = \
proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201, give_connect=test_connect)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': method, 'swift_owner': True},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Container-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Container-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Container-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Container-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_POST_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.POST(req)
self.assertTrue(called[0])
def test_PUT_calls_clean_acl(self):
called = [False]
def clean_acl(header, value):
called[0] = True
raise ValueError('fake error')
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Read': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
called[0] = False
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Container-Write': '.r:*'})
req.environ['swift.clean_acl'] = clean_acl
self.app.update_request(req)
controller.PUT(req)
self.assertTrue(called[0])
def test_GET_no_content(self):
with save_globals():
set_http_connect(200, 204, 204, 204)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c')
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.status_int, 204)
self.assertEqual(
res.environ['swift.container/a/c']['status'], 204)
self.assertEqual(res.content_length, 0)
self.assertTrue('transfer-encoding' not in res.headers)
def test_GET_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c')
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = controller.GET(req)
self.assertEqual(res.environ['swift.container/a/c']['status'], 201)
self.assertTrue(called[0])
def test_HEAD_calls_authorize(self):
called = [False]
def authorize(req):
called[0] = True
return HTTPUnauthorized(request=req)
with save_globals():
set_http_connect(200, 201, 201, 201)
controller = proxy_server.ContainerController(self.app, 'account',
'container')
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'HEAD'})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
controller.HEAD(req)
self.assertTrue(called[0])
def test_unauthorized_requests_when_account_not_found(self):
# verify unauthorized container requests always return response
# from swift.authorize
called = [0, 0]
def authorize(req):
called[0] += 1
return HTTPUnauthorized(request=req)
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE'):
# no delay_denial on method, expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([1, 0], called)
for method in ('HEAD', 'GET'):
# delay_denial on method, expect two calls to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(401, res.status_int)
self.assertEqual([2, 1], called)
def test_authorized_requests_when_account_not_found(self):
# verify authorized container requests always return 404 when
# account not found
called = [0, 0]
def authorize(req):
called[0] += 1
def account_info(*args):
called[1] += 1
return None, None, None
def _do_test(method):
with save_globals():
swift.proxy.controllers.Controller.account_info = account_info
app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': method})
req.environ['swift.authorize'] = authorize
self.app.update_request(req)
res = app.handle_request(req)
return res
for method in ('PUT', 'POST', 'DELETE', 'HEAD', 'GET'):
# expect one call to authorize
called = [0, 0]
res = _do_test(method)
self.assertEqual(404, res.status_int)
self.assertEqual([1, 1], called)
def test_OPTIONS_get_info_drops_origin(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
count = [0]
def my_get_info(app, env, account, container=None,
ret_not_found=False, swift_source=None):
if count[0] > 11:
return {}
count[0] += 1
if not container:
return {'some': 'stuff'}
return proxy_base.was_get_info(
app, env, account, container, ret_not_found, swift_source)
proxy_base.was_get_info = proxy_base.get_info
with mock.patch.object(proxy_base, 'get_info', my_get_info):
proxy_base.get_info = my_get_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
controller.OPTIONS(req)
self.assertTrue(count[0] < 11)
def test_OPTIONS(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def my_empty_container_info(*args):
return {}
controller.container_info = my_empty_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_empty_origin_container_info(*args):
return {'cors': {'allow_origin': None}}
controller.container_info = my_empty_origin_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
def my_container_info(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar:8080 https://foo.bar',
'max_age': '999',
}
}
controller.container_info = my_container_info
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
'https://foo.bar',
resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://foo.bar'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank('/v1/a/c', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
resp = controller.OPTIONS(req)
self.assertEqual(401, resp.status_int)
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.bar',
'Access-Control-Request-Method': 'GET'})
controller.app.cors_allow_origin = ['http://foo.bar', ]
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
def my_container_info_wildcard(*args):
return {
'cors': {
'allow_origin': '*',
'max_age': '999',
}
}
controller.container_info = my_container_info_wildcard
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual('*', resp.headers['access-control-allow-origin'])
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['access-control-allow-methods'])
self.assertEqual(
len(resp.headers['access-control-allow-methods'].split(', ')),
6)
self.assertEqual('999', resp.headers['access-control-max-age'])
req = Request.blank(
'/v1/a/c/o.jpg',
{'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'https://bar.baz',
'Access-Control-Request-Headers':
'x-foo, x-bar, x-auth-token',
'Access-Control-Request-Method': 'GET'}
)
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
self.assertEqual(
sortHeaderNames('x-foo, x-bar, x-auth-token'),
sortHeaderNames(resp.headers['access-control-allow-headers']))
def test_CORS_valid(self):
with save_globals():
controller = proxy_server.ContainerController(self.app, 'a', 'c')
def stubContainerInfo(*args):
return {
'cors': {
'allow_origin': 'http://foo.bar'
}
}
controller.container_info = stubContainerInfo
def containerGET(controller, req):
return Response(headers={
'X-Container-Meta-Color': 'red',
'X-Super-Secret': 'hush',
})
req = Request.blank(
'/v1/a/c',
{'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://foo.bar'})
resp = cors_validation(containerGET)(controller, req)
self.assertEqual(200, resp.status_int)
self.assertEqual('http://foo.bar',
resp.headers['access-control-allow-origin'])
self.assertEqual('red', resp.headers['x-container-meta-color'])
# X-Super-Secret is in the response, but not "exposed"
self.assertEqual('hush', resp.headers['x-super-secret'])
self.assertTrue('access-control-expose-headers' in resp.headers)
exposed = set(
h.strip() for h in
resp.headers['access-control-expose-headers'].split(','))
expected_exposed = set(['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp',
'x-trans-id', 'x-container-meta-color'])
self.assertEqual(expected_exposed, exposed)
def _gather_x_account_headers(self, controller_call, req, *connect_args,
**kwargs):
seen_headers = []
to_capture = ('X-Account-Partition', 'X-Account-Host',
'X-Account-Device')
def capture_headers(ipaddr, port, device, partition, method,
path, headers=None, query_string=None):
captured = {}
for header in to_capture:
captured[header] = headers.get(header)
seen_headers.append(captured)
with save_globals():
self.app.allow_account_management = True
set_http_connect(*connect_args, give_connect=capture_headers,
**kwargs)
resp = controller_call(req)
self.assertEqual(2, resp.status_int // 100) # sanity check
# don't care about the account HEAD, so throw away the
# first element
return sorted(seen_headers[1:],
key=lambda d: d['X-Account-Host'] or 'Z')
def test_PUT_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_PUT_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.PUT, req,
200, 201, 201, 201) # HEAD PUT PUT PUT
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_DELETE_x_account_headers_with_fewer_account_replicas(self):
self.app.account_ring.set_replicas(2)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000',
'X-Account-Partition': '0',
'X-Account-Device': 'sda'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': None,
'X-Account-Partition': None,
'X-Account-Device': None}
])
def test_DELETE_x_account_headers_with_more_account_replicas(self):
self.app.account_ring.set_replicas(4)
req = Request.blank('/v1/a/c', headers={'': ''})
controller = proxy_server.ContainerController(self.app, 'a', 'c')
seen_headers = self._gather_x_account_headers(
controller.DELETE, req,
200, 204, 204, 204) # HEAD DELETE DELETE DELETE
self.assertEqual(seen_headers, [
{'X-Account-Host': '10.0.0.0:1000,10.0.0.3:1003',
'X-Account-Partition': '0',
'X-Account-Device': 'sda,sdd'},
{'X-Account-Host': '10.0.0.1:1001',
'X-Account-Partition': '0',
'X-Account-Device': 'sdb'},
{'X-Account-Host': '10.0.0.2:1002',
'X-Account-Partition': '0',
'X-Account-Device': 'sdc'}
])
def test_PUT_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='PUT', headers={'': ''})
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_DELETE_backed_x_timestamp_header(self):
timestamps = []
def capture_timestamps(*args, **kwargs):
headers = kwargs['headers']
timestamps.append(headers.get('X-Timestamp'))
req = Request.blank('/v1/a/c', method='DELETE', headers={'': ''})
self.app.update_request(req)
with save_globals():
new_connect = set_http_connect(200, # account existence check
201, 201, 201,
give_connect=capture_timestamps)
resp = self.app.handle_request(req)
# sanity
self.assertRaises(StopIteration, new_connect.code_iter.next)
self.assertEqual(2, resp.status_int // 100)
timestamps.pop(0) # account existence check
self.assertEqual(3, len(timestamps))
for timestamp in timestamps:
self.assertEqual(timestamp, timestamps[0])
self.assertTrue(re.match('[0-9]{10}\.[0-9]{5}', timestamp))
def test_node_read_timeout_retry_to_container(self):
with save_globals():
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
self.app.node_timeout = 0.1
set_http_connect(200, 200, 200, body='abcdef', slow=[1.0, 1.0])
resp = req.get_response(self.app)
got_exc = False
try:
resp.body
except ChunkReadTimeout:
got_exc = True
self.assertTrue(got_exc)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
def assert_status_map(self, method, statuses, expected, env_expected=None,
headers=None, **kwargs):
headers = headers or {}
with save_globals():
set_http_connect(*statuses, **kwargs)
req = Request.blank('/v1/a', {}, headers=headers)
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(res.environ['swift.account/a']['status'],
env_expected)
set_http_connect(*statuses)
req = Request.blank('/v1/a/', {})
self.app.update_request(req)
res = method(req)
self.assertEqual(res.status_int, expected)
if env_expected:
self.assertEqual(res.environ['swift.account/a']['status'],
env_expected)
def test_OPTIONS(self):
with save_globals():
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
# Test a CORS OPTIONS request (i.e. including Origin and
# Access-Control-Request-Method headers)
self.app.allow_account_management = False
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank(
'/v1/account', {'REQUEST_METHOD': 'OPTIONS'},
headers={'Origin': 'http://foo.com',
'Access-Control-Request-Method': 'GET'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 4)
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = controller.OPTIONS(req)
self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split():
self.assertTrue(
verb in resp.headers['Allow'])
self.assertEqual(len(resp.headers['Allow'].split(', ')), 6)
def test_GET(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
# GET returns after the first successful call to an Account Server
self.assert_status_map(controller.GET, (200,), 200, 200)
self.assert_status_map(controller.GET, (503, 200), 200, 200)
self.assert_status_map(controller.GET, (503, 503, 200), 200, 200)
self.assert_status_map(controller.GET, (204,), 204, 204)
self.assert_status_map(controller.GET, (503, 204), 204, 204)
self.assert_status_map(controller.GET, (503, 503, 204), 204, 204)
self.assert_status_map(controller.GET, (404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 404, 200), 200, 200)
self.assert_status_map(controller.GET, (404, 503, 204), 204, 204)
# If Account servers fail, if autocreate = False, return majority
# response
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
self.assert_status_map(controller.GET, (404, 404, 503), 404, 404)
self.assert_status_map(controller.GET, (404, 503, 503), 503)
self.app.memcache = FakeMemcacheReturnsNone()
self.assert_status_map(controller.GET, (404, 404, 404), 404, 404)
def test_GET_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
# Repeat the test for autocreate = False and 404 by all
self.assert_status_map(controller.GET,
(404, 404, 404), 404)
self.assert_status_map(controller.GET,
(404, 503, 404), 404)
# When autocreate is True, if none of the nodes respond 2xx
# And quorum of the nodes responded 404,
# ALL nodes are asked to create the account
# If successful, the GET request is repeated.
controller.app.account_autocreate = True
self.assert_status_map(controller.GET,
(404, 404, 404), 204)
self.assert_status_map(controller.GET,
(404, 503, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.GET,
(500, 500, 400), 503)
def test_HEAD(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.HEAD, (200,), 200, 200)
self.assert_status_map(controller.HEAD, (503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (503, 503, 200), 200, 200)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (503, 204), 204, 204)
self.assert_status_map(controller.HEAD, (204, 503, 503), 204, 204)
self.assert_status_map(controller.HEAD, (204,), 204, 204)
self.assert_status_map(controller.HEAD, (404, 404, 404), 404, 404)
self.assert_status_map(controller.HEAD, (404, 404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 200), 200, 200)
self.assert_status_map(controller.HEAD, (404, 404, 503), 404, 404)
self.assert_status_map(controller.HEAD, (404, 503, 503), 503)
self.assert_status_map(controller.HEAD, (404, 503, 204), 204, 204)
def test_HEAD_autocreate(self):
# Same behaviour as GET
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.HEAD,
(404, 404, 404), 404)
controller.app.account_autocreate = True
self.assert_status_map(controller.HEAD,
(404, 404, 404), 204)
self.assert_status_map(controller.HEAD,
(500, 404, 404), 204)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.HEAD,
(500, 500, 400), 503)
def test_POST_autocreate(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201)
# account_info PUT account POST account
self.assert_status_map(
controller.POST,
(404, 404, 503, 201, 201, 503, 204, 204, 504), 204)
# what if create fails
self.assert_status_map(
controller.POST,
(404, 404, 404, 403, 403, 403, 400, 400, 400), 400)
def test_POST_autocreate_with_sysmeta(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
self.app.memcache = FakeMemcacheReturnsNone()
# first test with autocreate being False
self.assertFalse(self.app.account_autocreate)
self.assert_status_map(controller.POST,
(404, 404, 404), 404)
# next turn it on and test account being created than updated
controller.app.account_autocreate = True
calls = []
callback = _make_callback_func(calls)
key, value = 'X-Account-Sysmeta-Blah', 'something'
headers = {key: value}
self.assert_status_map(
controller.POST,
(404, 404, 404, 202, 202, 202, 201, 201, 201), 201,
# POST , autocreate PUT, POST again
headers=headers,
give_connect=callback)
self.assertEqual(9, len(calls))
for call in calls:
self.assertTrue(key in call['headers'],
'%s call, key %s missing in headers %s' %
(call['method'], key, call['headers']))
self.assertEqual(value, call['headers'][key])
def test_connection_refused(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = 1 # can't connect on this port
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_other_socket_error(self):
self.app.account_ring.get_nodes('account')
for dev in self.app.account_ring.devs:
dev['ip'] = '127.0.0.1'
dev['port'] = -1 # invalid port number
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/account', environ={'REQUEST_METHOD': 'HEAD'})
self.app.update_request(req)
resp = controller.HEAD(req)
self.assertEqual(resp.status_int, 503)
def test_response_get_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.GET(req)
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_response_head_accept_ranges_header(self):
with save_globals():
set_http_connect(200, 200, body='{}')
controller = proxy_server.AccountController(self.app, 'account')
req = Request.blank('/v1/a?format=json')
self.app.update_request(req)
res = controller.HEAD(req)
res.body
self.assertTrue('accept-ranges' in res.headers)
self.assertEqual(res.headers['accept-ranges'], 'bytes')
def test_PUT(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {})
req.content_length = 0
self.app.update_request(req)
res = controller.PUT(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_PUT_max_account_name_length(self):
with save_globals():
self.app.allow_account_management = True
limit = constraints.MAX_ACCOUNT_NAME_LENGTH
controller = proxy_server.AccountController(self.app, '1' * limit)
self.assert_status_map(controller.PUT, (201, 201, 201), 201)
controller = proxy_server.AccountController(
self.app, '2' * (limit + 1))
self.assert_status_map(controller.PUT, (201, 201, 201), 400)
def test_PUT_connect_exceptions(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, -1), 201)
self.assert_status_map(controller.PUT, (201, -1, -1), 503)
self.assert_status_map(controller.PUT, (503, 503, -1), 503)
def test_PUT_status(self):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'account')
self.assert_status_map(controller.PUT, (201, 201, 202), 202)
def test_PUT_metadata(self):
self.metadata_helper('PUT')
def test_POST_metadata(self):
self.metadata_helper('POST')
def metadata_helper(self, method):
for test_header, test_value in (
('X-Account-Meta-TestHeader', 'TestValue'),
('X-Account-Meta-TestHeader', ''),
('X-Remove-Account-Meta-TestHeader', 'anything')):
test_errors = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
if path == '/a':
find_header = test_header
find_value = test_value
if find_header.lower().startswith('x-remove-'):
find_header = \
find_header.lower().replace('-remove', '', 1)
find_value = ''
for k, v in headers.items():
if k.lower() == find_header.lower() and \
v == find_value:
break
else:
test_errors.append('%s: %s not in %s' %
(find_header, find_value, headers))
with save_globals():
self.app.allow_account_management = True
controller = \
proxy_server.AccountController(self.app, 'a')
set_http_connect(201, 201, 201, give_connect=test_connect)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method},
headers={test_header: test_value})
self.app.update_request(req)
getattr(controller, method)(req)
self.assertEqual(test_errors, [])
def test_PUT_bad_metadata(self):
self.bad_metadata_helper('PUT')
def test_POST_bad_metadata(self):
self.bad_metadata_helper('POST')
def bad_metadata_helper(self, method):
with save_globals():
self.app.allow_account_management = True
controller = proxy_server.AccountController(self.app, 'a')
set_http_connect(200, 201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * constraints.MAX_META_NAME_LENGTH): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank(
'/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-' +
('a' * (constraints.MAX_META_NAME_LENGTH + 1)): 'v'})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * constraints.MAX_META_VALUE_LENGTH})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers={'X-Account-Meta-Too-Long':
'a' * (constraints.MAX_META_VALUE_LENGTH + 1)})
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers = {}
for x in range(constraints.MAX_META_COUNT + 1):
headers['X-Account-Meta-%d' % x] = 'v'
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
set_http_connect(201, 201, 201)
headers = {}
header_value = 'a' * constraints.MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (constraints.MAX_META_OVERALL_SIZE - 4
- constraints.MAX_META_VALUE_LENGTH):
size += 4 + constraints.MAX_META_VALUE_LENGTH
headers['X-Account-Meta-%04d' % x] = header_value
x += 1
if constraints.MAX_META_OVERALL_SIZE - size > 1:
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size - 1)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 201)
set_http_connect(201, 201, 201)
headers['X-Account-Meta-a'] = \
'a' * (constraints.MAX_META_OVERALL_SIZE - size)
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method},
headers=headers)
self.app.update_request(req)
resp = getattr(controller, method)(req)
self.assertEqual(resp.status_int, 400)
def test_DELETE(self):
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a', {'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 405)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 201)
test_status_map((201, 201, 500), 201)
test_status_map((201, 500, 500), 503)
test_status_map((204, 500, 404), 503)
def test_DELETE_with_query_string(self):
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
with save_globals():
controller = proxy_server.AccountController(self.app, 'account')
def test_status_map(statuses, expected, **kwargs):
set_http_connect(*statuses, **kwargs)
self.app.memcache.store = {}
req = Request.blank('/v1/a?whoops',
environ={'REQUEST_METHOD': 'DELETE'})
req.content_length = 0
self.app.update_request(req)
res = controller.DELETE(req)
expected = str(expected)
self.assertEqual(res.status[:len(expected)], expected)
test_status_map((201, 201, 201), 400)
self.app.allow_account_management = True
test_status_map((201, 201, 201), 400)
test_status_map((201, 201, 500), 400)
test_status_map((201, 500, 500), 400)
test_status_map((204, 500, 404), 400)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountControllerFakeGetResponse(unittest.TestCase):
"""
Test all the faked-out GET responses for accounts that don't exist. They
have to match the responses for empty accounts that really exist.
"""
def setUp(self):
conf = {'account_autocreate': 'yes'}
self.app = proxy_server.Application(conf, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
self.app.memcache = FakeMemcacheReturnsNone()
def test_GET_autocreate_accept_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank(
'/v1/a', headers={'Accept': 'application/json'},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_format_json(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=json',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=json'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/json; charset=utf-8',
resp.headers['Content-Type'])
self.assertEqual("[]", resp.body)
def test_GET_autocreate_accept_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "text/xml"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('text/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_format_xml(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=xml',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=xml'})
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
self.assertEqual('application/xml; charset=utf-8',
resp.headers['Content-Type'])
empty_xml_listing = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<account name="a">\n</account>')
self.assertEqual(empty_xml_listing, resp.body)
def test_GET_autocreate_accept_unknown(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a', headers={"Accept": "mystery/meat"},
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a'})
resp = req.get_response(self.app)
self.assertEqual(406, resp.status_int)
def test_GET_autocreate_format_invalid_utf8(self):
with save_globals():
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
req = Request.blank('/v1/a?format=\xff\xfe',
environ={'REQUEST_METHOD': 'GET',
'PATH_INFO': '/v1/a',
'QUERY_STRING': 'format=\xff\xfe'})
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_account_acl_header_access(self):
acl = {
'admin': ['AUTH_alice'],
'read-write': ['AUTH_bob'],
'read-only': ['AUTH_carol'],
}
prefix = get_sys_meta_prefix('account')
privileged_headers = {(prefix + 'core-access-control'): format_acl(
version=2, acl_dict=acl)}
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
with save_globals():
# Mock account server will provide privileged information (ACLs)
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET'})
resp = app.handle_request(req)
# Not a swift_owner -- ACLs should NOT be in response
header = 'X-Account-Access-Control'
self.assertTrue(header not in resp.headers, '%r was in %r' % (
header, resp.headers))
# Same setup -- mock acct server will provide ACLs
set_http_connect(200, 200, 200, headers=privileged_headers)
req = Request.blank('/v1/a', environ={'REQUEST_METHOD': 'GET',
'swift_owner': True})
resp = app.handle_request(req)
# For a swift_owner, the ACLs *should* be in response
self.assertTrue(header in resp.headers, '%r not in %r' % (
header, resp.headers))
def test_account_acls_through_delegation(self):
# Define a way to grab the requests sent out from the AccountController
# to the Account Server, and a way to inject responses we'd like the
# Account Server to return.
resps_to_send = []
@contextmanager
def patch_account_controller_method(verb):
old_method = getattr(proxy_server.AccountController, verb)
new_method = lambda self, req, *_, **__: resps_to_send.pop(0)
try:
setattr(proxy_server.AccountController, verb, new_method)
yield
finally:
setattr(proxy_server.AccountController, verb, old_method)
def make_test_request(http_method, swift_owner=True):
env = {
'REQUEST_METHOD': http_method,
'swift_owner': swift_owner,
}
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {} if http_method in ('GET', 'HEAD') else {
'x-account-access-control': format_acl(version=2, acl_dict=acl)
}
return Request.blank('/v1/a', environ=env, headers=headers)
# Our AccountController will invoke methods to communicate with the
# Account Server, and they will return responses like these:
def make_canned_response(http_method):
acl = {
'admin': ['foo'],
'read-write': ['bar'],
'read-only': ['bas'],
}
headers = {'x-account-sysmeta-core-access-control': format_acl(
version=2, acl_dict=acl)}
canned_resp = Response(headers=headers)
canned_resp.environ = {
'PATH_INFO': '/acct',
'REQUEST_METHOD': http_method,
}
resps_to_send.append(canned_resp)
app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing())
app.allow_account_management = True
ext_header = 'x-account-access-control'
with patch_account_controller_method('GETorHEAD_base'):
# GET/HEAD requests should remap sysmeta headers from acct server
for verb in ('GET', 'HEAD'):
make_canned_response(verb)
req = make_test_request(verb)
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
# swift_owner = False: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
resp = app.handle_request(req)
h = resp.headers
self.assertEqual(None, h.get(ext_header))
# swift_owner unset: GET/HEAD shouldn't return sensitive info
make_canned_response(verb)
req = make_test_request(verb, swift_owner=False)
del req.environ['swift_owner']
resp = app.handle_request(req)
h = resp.headers
self.assertEqual(None, h.get(ext_header))
# Verify that PUT/POST requests remap sysmeta headers from acct server
with patch_account_controller_method('make_requests'):
make_canned_response('PUT')
req = make_test_request('PUT')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
make_canned_response('POST')
req = make_test_request('POST')
resp = app.handle_request(req)
h = parse_acl(version=2, data=resp.headers.get(ext_header))
self.assertEqual(h['admin'], ['foo'])
self.assertEqual(h['read-write'], ['bar'])
self.assertEqual(h['read-only'], ['bas'])
class FakeObjectController(object):
def __init__(self):
self.app = self
self.logger = self
self.account_name = 'a'
self.container_name = 'c'
self.object_name = 'o'
self.trans_id = 'tx1'
self.object_ring = FakeRing()
self.node_timeout = 1
self.rate_limit_after_segment = 3
self.rate_limit_segments_per_sec = 2
self.GETorHEAD_base_args = []
def exception(self, *args):
self.exception_args = args
self.exception_info = sys.exc_info()
def GETorHEAD_base(self, *args):
self.GETorHEAD_base_args.append(args)
req = args[0]
path = args[4]
body = data = path[-1] * int(path[-1])
if req.range:
r = req.range.ranges_for_length(len(data))
if r:
(start, stop) = r[0]
body = data[start:stop]
resp = Response(app_iter=iter(body))
return resp
def iter_nodes(self, ring, partition):
for node in ring.get_part_nodes(partition):
yield node
for node in ring.get_more_nodes(partition):
yield node
def sort_nodes(self, nodes):
return nodes
def set_node_timing(self, node, timing):
return
class TestProxyObjectPerformance(unittest.TestCase):
def setUp(self):
# This is just a simple test that can be used to verify and debug the
# various data paths between the proxy server and the object
# server. Used as a play ground to debug buffer sizes for sockets.
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is transmitting in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
# Small, fast for testing
obj_len = 2 * 64 * 1024
# Use 1 GB or more for measurements
# obj_len = 2 * 512 * 1024 * 1024
self.path = '/v1/a/c/o.large'
fd.write('PUT %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octet-stream\r\n'
'\r\n' % (self.path, str(obj_len)))
fd.write('a' * obj_len)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.obj_len = obj_len
def test_GET_debug_large_file(self):
for i in range(10):
start = time.time()
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
# Client is reading in 2 MB chunks
fd = sock.makefile('wb', 2 * 1024 * 1024)
fd.write('GET %s HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n'
'\r\n' % self.path)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
total = 0
while True:
buf = fd.read(100000)
if not buf:
break
total += len(buf)
self.assertEqual(total, self.obj_len)
end = time.time()
print("Run %02d took %07.03f" % (i, end - start))
@patch_policies([StoragePolicy(0, 'migrated', object_ring=FakeRing()),
StoragePolicy(1, 'ernie', True, object_ring=FakeRing()),
StoragePolicy(2, 'deprecated', is_deprecated=True,
object_ring=FakeRing()),
StoragePolicy(3, 'bert', object_ring=FakeRing())])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
proxy_server.Application({}, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing())
si = utils.get_swift_info()['swift']
self.assertTrue('version' in si)
self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE)
self.assertEqual(si['max_meta_name_length'],
constraints.MAX_META_NAME_LENGTH)
self.assertEqual(si['max_meta_value_length'],
constraints.MAX_META_VALUE_LENGTH)
self.assertEqual(si['max_meta_count'], constraints.MAX_META_COUNT)
self.assertEqual(si['max_header_size'], constraints.MAX_HEADER_SIZE)
self.assertEqual(si['max_meta_overall_size'],
constraints.MAX_META_OVERALL_SIZE)
self.assertEqual(si['account_listing_limit'],
constraints.ACCOUNT_LISTING_LIMIT)
self.assertEqual(si['container_listing_limit'],
constraints.CONTAINER_LISTING_LIMIT)
self.assertEqual(si['max_account_name_length'],
constraints.MAX_ACCOUNT_NAME_LENGTH)
self.assertEqual(si['max_container_name_length'],
constraints.MAX_CONTAINER_NAME_LENGTH)
self.assertEqual(si['max_object_name_length'],
constraints.MAX_OBJECT_NAME_LENGTH)
self.assertTrue('strict_cors_mode' in si)
self.assertEqual(si['allow_account_management'], False)
self.assertEqual(si['account_autocreate'], False)
# This setting is by default excluded by disallowed_sections
self.assertEqual(si['valid_api_versions'],
constraints.VALID_API_VERSIONS)
# this next test is deliberately brittle in order to alert if
# other items are added to swift info
self.assertEqual(len(si), 18)
self.assertTrue('policies' in si)
sorted_pols = sorted(si['policies'], key=operator.itemgetter('name'))
self.assertEqual(len(sorted_pols), 3)
for policy in sorted_pols:
self.assertNotEqual(policy['name'], 'deprecated')
self.assertEqual(sorted_pols[0]['name'], 'bert')
self.assertEqual(sorted_pols[1]['name'], 'ernie')
self.assertEqual(sorted_pols[2]['name'], 'migrated')
class TestSocketObjectVersions(unittest.TestCase):
def setUp(self):
global _test_sockets
self.prolis = prolis = listen(('localhost', 0))
self._orig_prolis = _test_sockets[0]
allowed_headers = ', '.join([
'content-encoding',
'x-object-manifest',
'content-disposition',
'foo'
])
conf = {'devices': _testdir, 'swift_dir': _testdir,
'mount_check': 'false', 'allowed_headers': allowed_headers}
prosrv = versioned_writes.VersionedWritesMiddleware(
proxy_logging.ProxyLoggingMiddleware(
_test_servers[0], conf,
logger=_test_servers[0].logger),
{})
self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger())
# replace global prosrv with one that's filtered with version
# middleware
self.sockets = list(_test_sockets)
self.sockets[0] = prolis
_test_sockets = tuple(self.sockets)
def tearDown(self):
self.coro.kill()
# put the global state back
global _test_sockets
self.sockets[0] = self._orig_prolis
_test_sockets = tuple(self.sockets)
def test_version_manifest(self, oc='versions', vc='vers', o='name'):
versions_to_create = 3
# Create a container for our versioned object testing
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis,
obj2lis, obj3lis) = _test_sockets
pre = quote('%03x' % len(o))
osub = '%s/sub' % o
presub = quote('%03x' % len(osub))
osub = quote(osub)
presub = quote(presub)
oc = quote(oc)
vc = quote(vc)
def put_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: %s\r\n\r\n'
% (oc, vc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
headers = put_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def get_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# check that the header was set
headers, body = get_container()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('X-Versions-Location: %s' % vc, headers)
def put_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# make the container for the object versions
headers = put_version_container()
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
def put(version):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish%s'
'\r\n\r\n%05d\r\n' % (oc, o, version, version))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def get(container=oc, obj=o):
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n'
'\r\n' % (container, obj))
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Create the versioned file
headers = put(0)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the object versions
for version in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
headers = put(version)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % version, headers)
self.assertNotIn('X-Object-Meta-Foo: barbaz', headers)
self.assertEqual(body, '%05d' % version)
def get_version_container():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\n'
'X-Storage-Token: t\r\n\r\n' % vc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
# Ensure we have the right number of versions saved
headers, body = get_version_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), versions_to_create - 1)
def delete():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r'
'\nConnection: close\r\nX-Storage-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
def copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('COPY /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nDestination: %s/copied_name\r\n'
'Content-Length: 0\r\n\r\n' % (oc, o, oc))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# copy a version and make sure the version info is stripped
headers = copy()
exp = 'HTTP/1.1 2' # 2xx series response to the COPY
self.assertEqual(headers[:len(exp)], exp)
def get_copy():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/copied_name HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\n'
'X-Auth-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
body = fd.read()
return headers, body
headers, body = get_copy()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertEqual(body, '%05d' % version)
def post():
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('POST /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: '
't\r\nContent-Type: foo/bar\r\nContent-Length: 0\r\n'
'X-Object-Meta-Bar: foo\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
fd.read()
return headers
# post and make sure it's updated
headers = post()
exp = 'HTTP/1.1 2' # 2xx series response to the POST
self.assertEqual(headers[:len(exp)], exp)
headers, body = get()
self.assertIn('Content-Type: foo/bar', headers)
self.assertIn('X-Object-Meta-Bar: foo', headers)
self.assertEqual(body, '%05d' % version)
# check container listing
headers, body = get_container()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object versions
for segment in range(versions_to_create - 1, 0, -1):
headers = delete()
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure retrieving the manifest file gets the latest version
headers, body = get()
exp = 'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn('Content-Type: text/jibberish%s' % (segment - 1),
headers)
self.assertEqual(body, '%05d' % (segment - 1))
# Ensure we have the right number of versions saved
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r'
'\n' % (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), segment - 1)
# there is now one version left (in the manifest)
# Ensure we have no saved versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# delete the last version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
# Ensure it's all gone
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 404'
self.assertEqual(headers[:len(exp)], exp)
# make sure manifest files will be ignored
for _junk in range(1, versions_to_create):
sleep(.01) # guarantee that the timestamp changes
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 0\r\n'
'Content-Type: text/jibberish0\r\n'
'Foo: barbaz\r\nX-Object-Manifest: %s/%s/\r\n\r\n'
% (oc, o, oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nhost: '
'localhost\r\nconnection: close\r\nx-auth-token: t\r\n\r\n'
% (vc, pre, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204 No Content'
self.assertEqual(headers[:len(exp)], exp)
# DELETE v1/a/c/obj shouldn't delete v1/a/c/obj/sub versions
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00000\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\n00001\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub1\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%s/%s HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 4\r\nContent-Type: text/jibberish0\r\n'
'Foo: barbaz\r\n\r\nsub2\r\n' % (oc, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % (oc, o))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('GET /v1/a/%s?prefix=%s%s/ HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Auth-Token: t\r\n\r\n'
% (vc, presub, osub))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx series response
self.assertEqual(headers[:len(exp)], exp)
body = fd.read()
versions = [x for x in body.split('\n') if x]
self.assertEqual(len(versions), 1)
# Check for when the versions target container doesn't exist
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\nX-Versions-Location: none\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create the versioned file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00000\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
# Create another version
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/%swhoops/foo HTTP/1.1\r\nHost: '
'localhost\r\nConnection: close\r\nX-Storage-Token: '
't\r\nContent-Length: 5\r\n\r\n00001\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 412'
self.assertEqual(headers[:len(exp)], exp)
# Delete the object
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/%swhoops/foo HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n\r\n' % oc)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 2' # 2xx response
self.assertEqual(headers[:len(exp)], exp)
def test_version_manifest_utf8(self):
oc = '0_oc_non_ascii\xc2\xa3'
vc = '0_vc_non_ascii\xc2\xa3'
o = '0_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container(self):
oc = '1_oc_non_ascii\xc2\xa3'
vc = '1_vc_ascii'
o = '1_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container(self):
oc = '2_oc_ascii'
vc = '2_vc_non_ascii\xc2\xa3'
o = '2_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_containers(self):
oc = '3_oc_non_ascii\xc2\xa3'
vc = '3_vc_non_ascii\xc2\xa3'
o = '3_o_ascii'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_object(self):
oc = '4_oc_ascii'
vc = '4_vc_ascii'
o = '4_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_version_container_utf_object(self):
oc = '5_oc_ascii'
vc = '5_vc_non_ascii\xc2\xa3'
o = '5_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
def test_version_manifest_utf8_container_utf_object(self):
oc = '6_oc_non_ascii\xc2\xa3'
vc = '6_vc_ascii'
o = '6_o_non_ascii\xc2\xa3'
self.test_version_manifest(oc, vc, o)
if __name__ == '__main__':
setup()
try:
unittest.main()
finally:
teardown()
| thiagodasilva/swift | test/unit/proxy/test_server.py | Python | apache-2.0 | 417,412 | [
"MOOSE"
] | df3e150f8c114cfe59d9b21103de0aa3d2337a0bfa64cb03a6b8a06abd197a74 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Processing/Extract Information from Names"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# gnome/gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gui.utils import ProgressMeter
from gramps.gui.plug import tool
from gramps.gui.dialog import OkDialog
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.display import display_help
from gramps.gui.glade import Glade
from gramps.gen.lib import NameOriginType, Surname
from gramps.gen.db import DbTxn
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Extract_Information_from_Names')
#-------------------------------------------------------------------------
#
# constants
#
#-------------------------------------------------------------------------
# List of possible surname prefixes. Notice that you must run the tool
# multiple times for prefixes such as "van der".
PREFIX_LIST = [
"de", "van", "von", "di", "le", "du", "dela", "della",
"des", "vande", "ten", "da", "af", "den", "das", "dello",
"del", "en", "ein", "el" "et", "les", "lo", "los", "un",
"um", "una", "uno", "der", "ter", "te", "die"]
CONNECTOR_LIST = ['e', 'y', ]
CONNECTOR_LIST_NONSPLIT = ['de', 'van']
_title_re = re.compile(r"^ ([A-Za-z][A-Za-z]+\.) \s+ (.+) $", re.VERBOSE)
_nick_re = re.compile(r"(.+) \s* [(\"] (.+) [)\"]", re.VERBOSE)
#-------------------------------------------------------------------------
#
# Search each name in the database, and compare the firstname against the
# form of "Name (Nickname)". If it matches, change the first name entry
# to "Name" and add "Nickname" into the nickname field. Also, search for
# surname prefixes. If found, change the name entry and put the prefix in
# the name prefix field.
#
#-------------------------------------------------------------------------
class PatchNames(tool.BatchTool, ManagedWindow):
titleid = 1
nickid = 2
pref1id = 3
compid = 4
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
self.label = _('Name and title extraction tool')
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.set_window(Gtk.Window(), Gtk.Label(), '')
tool.BatchTool.__init__(self, dbstate, user, options_class, name)
if self.fail:
self.close()
return
winprefix = Gtk.Dialog(
title=_("Default prefix and connector settings"),
transient_for=self.uistate.window, modal=True,
destroy_with_parent=True)
winprefix.add_button(_('_OK'), Gtk.ResponseType.ACCEPT)
winprefix.vbox.set_spacing(5)
hboxpref = Gtk.Box()
label = Gtk.Label(label=_('Prefixes to search for:'))
hboxpref.pack_start(label, False, False, 5)
self.prefixbox = Gtk.Entry()
self.prefixbox.set_text(', '.join(PREFIX_LIST))
hboxpref.pack_start(self.prefixbox, True, True, 0)
winprefix.vbox.pack_start(hboxpref, True, True, 0)
hboxcon = Gtk.Box()
label = Gtk.Label(label=_('Connectors splitting surnames:'))
hboxcon.pack_start(label, False, False, 5)
self.conbox = Gtk.Entry()
self.conbox.set_text(', '.join(CONNECTOR_LIST))
hboxcon.pack_start(self.conbox, True, True, 0)
winprefix.vbox.pack_start(hboxcon, True, True, 0)
hboxconns = Gtk.Box()
label = Gtk.Label(label=_('Connectors not splitting surnames:'))
hboxconns.pack_start(label, False, False, 5)
self.connsbox = Gtk.Entry()
self.connsbox.set_text(', '.join(CONNECTOR_LIST_NONSPLIT))
hboxconns.pack_start(self.connsbox, True, True, 0)
winprefix.vbox.pack_start(hboxconns, True, True, 0)
winprefix.resize(700, 100)
winprefix.show_all()
response = winprefix.run()
self.prefix_list = self.prefixbox.get_text().split(',')
self.prefix_list = list(map(strip, self.prefix_list))
self.prefixbox = None
self.connector_list = self.conbox.get_text().split(',')
self.connector_list = list(map(strip, self.connector_list))
self.conbox = None
self.connector_list_nonsplit = self.connsbox.get_text().split(',')
self.connector_list_nonsplit = list(
map(strip, self.connector_list_nonsplit))
self.connsbox = None
# Find a prefix in the first_name
self._fn_prefix_re = re.compile(
r"(\S+)\s+(%s)\s*$" % '|'.join(self.prefix_list), re.IGNORECASE)
# Find a prefix in the surname
self._sn_prefix_re = re.compile(
r"^\s*(%s)\s+(.+)" % '|'.join(self.prefix_list), re.IGNORECASE)
# Find a connector in the surname
self._sn_con_re = re.compile(
r"^\s*(.+)\s+(%s)\s+(.+)" % '|'.join(self.connector_list),
re.IGNORECASE)
winprefix.destroy()
self.cb = callback
self.handle_to_action = {}
self.progress = ProgressMeter(
_('Extracting Information from Names'), '',
parent=self.uistate.window)
self.progress.set_pass(_('Analyzing names'),
self.db.get_number_of_people())
for person in self.db.iter_people():
key = person.handle
name = person.get_primary_name()
first = name.get_first_name()
sname = name.get_surname()
old_prefix = []
old_surn = []
old_con = []
old_prim = []
old_orig = []
for surn in name.get_surname_list():
old_prefix.append(surn.get_prefix())
old_surn.append(surn.get_surname())
old_con.append(surn.get_connector())
old_prim.append(surn.get_primary())
old_orig.append(surn.get_origintype())
if name.get_title():
old_title = [name.get_title()]
else:
old_title = []
new_title = []
match = _title_re.match(first)
while match:
groups = match.groups()
first = groups[1]
new_title.append(groups[0])
match = _title_re.match(first)
matchnick = _nick_re.match(first)
if new_title:
titleval = (" ".join(old_title + new_title), first)
if key in self.handle_to_action:
self.handle_to_action[key][self.titleid] = titleval
else:
self.handle_to_action[key] = {self.titleid: titleval}
elif matchnick:
# we check for nick, which changes given name like title
groups = matchnick.groups()
nickval = (groups[0], groups[1])
if key in self.handle_to_action:
self.handle_to_action[key][self.nickid] = nickval
else:
self.handle_to_action[key] = {self.nickid: nickval}
else:
# Try to find the name prefix in the given name, also this
# changes given name
match = self._fn_prefix_re.match(first)
if match:
groups = match.groups()
if old_prefix[0]:
# Put the found prefix before the old prefix
new_prefix = " ".join([groups[1], old_prefix[0]])
else:
new_prefix = groups[1]
pref1val = (groups[0], new_prefix, groups[1])
if key in self.handle_to_action:
self.handle_to_action[key][self.pref1id] = pref1val
else:
self.handle_to_action[key] = {self.pref1id: pref1val}
#check for Gedcom import of compound surnames
if len(old_surn) == 1 and old_con[0] == '':
prefixes = old_prefix[0].split(',')
surnames = old_surn[0].split(',')
if len(prefixes) > 1 and len(prefixes) == len(surnames):
#assume a list of prefix and a list of surnames
prefixes = list(map(strip, prefixes))
surnames = list(map(strip, surnames))
primaries = [False] * len(prefixes)
primaries[0] = True
origs = []
for ind in range(len(prefixes)):
origs.append(NameOriginType())
origs[0] = old_orig[0]
compoundval = (surnames, prefixes, [''] * len(prefixes),
primaries, origs)
if key in self.handle_to_action:
self.handle_to_action[key][self.compid] = compoundval
else:
self.handle_to_action[key] = {self.compid: compoundval}
#we cannot check compound surnames, so continue the loop
continue
# Next, try to split surname in compounds: prefix surname connector
found = False
new_prefix_list = []
new_surname_list = []
new_connector_list = []
new_prim_list = []
new_orig_list = []
ind = 0
cont = True
for pref, surn, con, prim, orig in zip(
old_prefix, old_surn, old_con, old_prim, old_orig):
surnval = surn.split()
if surnval == []:
new_prefix_list.append(pref)
new_surname_list.append('')
new_connector_list.append(con)
new_prim_list.append(prim)
new_orig_list.append(orig)
cont = False
continue
val = surnval.pop(0)
while cont:
new_prefix_list.append(pref)
new_surname_list.append('')
new_connector_list.append(con)
new_prim_list.append(prim)
new_orig_list.append(orig)
while cont and (val.lower() in self.prefix_list):
found = True
if new_prefix_list[-1]:
new_prefix_list[-1] += ' ' + val
else:
new_prefix_list[-1] = val
try:
val = surnval.pop(0)
except IndexError:
val = ''
cont = False
#after prefix we have a surname
if cont:
new_surname_list[-1] = val
try:
val = surnval.pop(0)
except IndexError:
val = ''
cont = False
#if value after surname indicates continue, then continue
while cont and (
val.lower() in self.connector_list_nonsplit):
#add this val to the current surname
new_surname_list[-1] += ' ' + val
try:
val = surnval.pop(0)
except IndexError:
val = ''
cont = False
# if previous is non-splitting connector, then add new val
# to current surname
if cont and (new_surname_list[-1].split()[-1].lower()
in self.connector_list_nonsplit):
new_surname_list[-1] += ' ' + val
try:
val = surnval.pop(0)
except IndexError:
val = ''
cont = False
#if next is a connector, add it to the surname
if cont and val.lower() in self.connector_list:
found = True
if new_connector_list[-1]:
new_connector_list[-1] = ' ' + val
else:
new_connector_list[-1] = val
try:
val = surnval.pop(0)
except IndexError:
val = ''
cont = False
# initialize for a next surname in case there are still
# val
if cont:
found = True # we split surname
pref = ''
con = ''
prim = False
orig = NameOriginType()
ind += 1
if found:
compoundval = (new_surname_list, new_prefix_list,
new_connector_list, new_prim_list,
new_orig_list)
if key in self.handle_to_action:
self.handle_to_action[key][self.compid] = compoundval
else:
self.handle_to_action[key] = {self.compid: compoundval}
self.progress.step()
if self.handle_to_action:
self.display()
else:
self.progress.close()
self.close()
OkDialog(_('No modifications made'),
_("No titles, nicknames or prefixes were found"),
parent=self.uistate.window)
def build_menu_names(self, obj):
return (self.label, None)
def toggled(self, cell, path_string):
path = tuple(map(int, path_string.split(':')))
row = self.model[path]
row[0] = not row[0]
self.model.row_changed(path, row.iter)
def display(self):
self.top = Glade()
window = self.top.toplevel
self.top.connect_signals({
"destroy_passed_object": self.close,
"on_ok_clicked": self.on_ok_clicked,
"on_help_clicked": self.on_help_clicked,
"on_delete_event": self.close})
self.list = self.top.get_object("list")
self.set_window(window, self.top.get_object('title'), self.label)
self.setup_configs("interface.patchnames", 680, 400)
self.model = Gtk.ListStore(GObject.TYPE_BOOLEAN, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING)
r = Gtk.CellRendererToggle()
r.connect('toggled', self.toggled)
c = Gtk.TreeViewColumn(_('Select'), r, active=0)
self.list.append_column(c)
c = Gtk.TreeViewColumn(_('ID'), Gtk.CellRendererText(), text=1)
self.list.append_column(c)
c = Gtk.TreeViewColumn(_('Type'), Gtk.CellRendererText(), text=2)
self.list.append_column(c)
c = Gtk.TreeViewColumn(_('Value'), Gtk.CellRendererText(), text=3)
self.list.append_column(c)
c = Gtk.TreeViewColumn(_('Current Name'), Gtk.CellRendererText(),
text=4)
self.list.append_column(c)
self.list.set_model(self.model)
self.nick_hash = {}
self.title_hash = {}
self.prefix1_hash = {}
self.compound_hash = {}
self.progress.set_pass(_('Building display'),
len(list(self.handle_to_action.keys())))
for key, data in self.handle_to_action.items():
p = self.db.get_person_from_handle(key)
gid = p.get_gramps_id()
if self.nickid in data:
given, nick = data[self.nickid]
handle = self.model.append()
self.model.set_value(handle, 0, 1)
self.model.set_value(handle, 1, gid)
self.model.set_value(handle, 2, _('Nickname'))
self.model.set_value(handle, 3, nick)
self.model.set_value(handle, 4,
p.get_primary_name().get_name())
self.nick_hash[key] = handle
if self.titleid in data:
title, given = data[self.titleid]
handle = self.model.append()
self.model.set_value(handle, 0, 1)
self.model.set_value(handle, 1, gid)
self.model.set_value(handle, 2, _('Person|Title'))
self.model.set_value(handle, 3, title)
self.model.set_value(
handle, 4, p.get_primary_name().get_name())
self.title_hash[key] = handle
if self.pref1id in data:
given, prefixtotal, new_prefix = data[self.pref1id]
handle = self.model.append()
self.model.set_value(handle, 0, 1)
self.model.set_value(handle, 1, gid)
self.model.set_value(handle, 2, _('Prefix in given name'))
self.model.set_value(handle, 3, prefixtotal)
self.model.set_value(
handle, 4, p.get_primary_name().get_name())
self.prefix1_hash[key] = handle
if self.compid in data:
surn_list, pref_list, con_list, prims, origs =\
data[self.compid]
handle = self.model.append()
self.model.set_value(handle, 0, 1)
self.model.set_value(handle, 1, gid)
self.model.set_value(handle, 2, _('Compound surname'))
newval = ''
for sur, pre, con in zip(surn_list, pref_list, con_list):
if newval:
newval += '-['
else:
newval = '['
newval += pre + ',' + sur
if con:
newval += ',' + con + ']'
else:
newval += ']'
self.model.set_value(handle, 3, newval)
self.model.set_value(handle, 4,
p.get_primary_name().get_name())
self.compound_hash[key] = handle
self.progress.step()
self.progress.close()
self.show()
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def on_ok_clicked(self, obj):
with DbTxn(_("Extract information from names"), self.db, batch=True
) as trans:
self.db.disable_signals()
for key, data in self.handle_to_action.items():
p = self.db.get_person_from_handle(key)
if self.nickid in data:
modelhandle = self.nick_hash[key]
val = self.model.get_value(modelhandle, 0)
if val:
given, nick = data[self.nickid]
name = p.get_primary_name()
name.set_first_name(given.strip())
name.set_nick_name(nick.strip())
if self.titleid in data:
modelhandle = self.title_hash[key]
val = self.model.get_value(modelhandle, 0)
if val:
title, given = data[self.titleid]
name = p.get_primary_name()
name.set_first_name(given.strip())
name.set_title(title.strip())
if self.pref1id in data:
modelhandle = self.prefix1_hash[key]
val = self.model.get_value(modelhandle, 0)
if val:
given, prefixtotal, prefix = data[self.pref1id]
name = p.get_primary_name()
name.set_first_name(given.strip())
oldpref = name.get_surname_list()[0].get_prefix().strip()
if oldpref == '' or oldpref == prefix.strip():
name.get_surname_list()[0].set_prefix(prefix)
else:
name.get_surname_list()[0].set_prefix(
'%s %s' % (prefix, oldpref))
if self.compid in data:
modelhandle = self.compound_hash[key]
val = self.model.get_value(modelhandle, 0)
if val:
surns, prefs, cons, prims, origs = data[self.compid]
name = p.get_primary_name()
new_surn_list = []
for surn, pref, con, prim, orig in zip(
surns, prefs, cons, prims, origs):
new_surn_list.append(Surname())
new_surn_list[-1].set_surname(surn.strip())
new_surn_list[-1].set_prefix(pref.strip())
new_surn_list[-1].set_connector(con.strip())
new_surn_list[-1].set_primary(prim)
new_surn_list[-1].set_origintype(orig)
name.set_surname_list(new_surn_list)
self.db.commit_person(p, trans)
self.db.enable_signals()
self.db.request_rebuild()
self.close()
self.cb()
class PatchNamesOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
def strip(arg):
return arg.strip()
| beernarrd/gramps | gramps/plugins/tool/patchnames.py | Python | gpl-2.0 | 23,530 | [
"Brian"
] | 9cbcb17580b052d2083007f47b15529e20799bc591ae11a683470c0d336c6405 |
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import sys
import urllib # For urllib.parse.unquote
from string import hexdigits
from operator import itemgetter
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
# Match a RFC 2047 word, looks like =?utf-8?q?someword?=
rfc2047_matcher = re.compile(r'''
=\? # literal =?
[^?]* # charset
\? # literal ?
[qQbB] # literal 'q' or 'b', case insensitive
\? # literal ?
.*? # encoded word
\?= # literal ?=
''', re.VERBOSE | re.MULTILINE)
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
syntactic_break = True
ew_combine_allowed = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
def startswith_fws(self):
return self[0].startswith_fws()
@property
def as_ew_allowed(self):
"""True if all top level tokens of this part may be RFC2047 encoded."""
return all(part.as_ew_allowed for part in self)
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
return _refold_parse_tree(self, policy=policy)
def pprint(self, indent=''):
print(self.ppstr(indent=indent))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=indent))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
class Phrase(TokenList):
token_type = 'phrase'
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
encode_as_ew = False
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', r'\(').replace(
')', r'\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
if x.local_part:
return x.addr_spec
else:
return quote_string(x.local_part) + x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
as_ew_allowed = True
class NoFoldLiteral(TokenList):
token_type = 'no-fold-literal'
as_ew_allowed = False
class AddrSpec(TokenList):
token_type = 'addr-spec'
as_ew_allowed = False
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
as_ew_allowed = False
class DisplayName(Phrase):
token_type = 'display-name'
ew_combine_allowed = False
@property
def display_name(self):
res = TokenList(self)
if len(res) == 0:
return res.value
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if len(self) != 0 and quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
as_ew_allowed = False
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
syntactic_break = False
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = {} # Using order preserving dict from Python 3.7+
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts, key=itemgetter(0))
first_param = parts[0][1]
charset = first_param.charset
# Our arbitrary error recovery is to ignore duplicate parameters,
# to use appearance order if there are duplicate rfc 2231 parts,
# and to ignore gaps. This mimics the error recovery of get_param.
if not first_param.extended and len(parts) > 1:
if parts[1][0] == 0:
parts[1][1].defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate(s) ignored'))
parts = parts[:1]
# Else assume the *0* was missing...note that this is different
# from get_param, but we registered a defect for this earlier.
value_parts = []
i = 0
for section_number, param in parts:
if section_number != i:
# We could get fancier here and look for a complete
# duplicate extended parameter and ignore the second one
# seen. But we're not doing that. The old code didn't.
if not param.extended:
param.defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate ignored'))
continue
else:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent RFC2231 parameter numbering"))
i += 1
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
# Set this false so that the value doesn't wind up on a new line even
# if it and the parameters would fit there but not on the first line.
syntactic_break = False
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
as_ew_allowed = False
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
as_ew_allowed = False
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
as_ew_allowed = False
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
as_ew_allowed = False
class MsgID(TokenList):
token_type = 'msg-id'
as_ew_allowed = False
def fold(self, policy):
# message-id tokens may not be folded.
return str(self) + policy.linesep
class MessageID(MsgID):
token_type = 'message-id'
class InvalidMessageID(MessageID):
token_type = 'invalid-message-id'
class Header(TokenList):
token_type = 'header'
#
# Terminal classes and instances
#
class Terminal(str):
as_ew_allowed = True
ew_combine_allowed = True
syntactic_break = True
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
def pprint(self):
print(self.__class__.__name__ + '/' + self.token_type)
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def pop_trailing_ws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
def __str__(self):
return ''
class _InvalidEwError(errors.HeaderParseError):
"""Invalid encoded word found while parsing headers."""
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(ATOM_ENDS)))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(TOKEN_ENDS)))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(ATTRIBUTE_ENDS)))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(EXTENDED_ATTRIBUTE_ENDS)))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if (len(remstr) > 1 and
remstr[0] in hexdigits and
remstr[1] in hexdigits and
tok.count('?') < 2):
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except (ValueError, KeyError):
raise _InvalidEwError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
# Encoded words should be followed by a WS
if value and value[0] not in WSP:
ew.defects.append(errors.InvalidHeaderDefect(
"missing trailing whitespace after encoded-word"))
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
valid_ew = True
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except _InvalidEwError:
valid_ew = False
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
# Split in the middle of an atom if there is a rfc2047 encoded word
# which does not have WSP on both sides. The defect will be registered
# the next time through the loop.
# This needs to only be performed when the encoded word is valid;
# otherwise, performing it on an invalid encoded word can cause
# the parser to go in an infinite loop.
if valid_ew and rfc2047_matcher.search(tok):
tok, *remainder = value.partition('=?')
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
r"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII, a NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
if value and value[0] == '"':
token, value = get_qcontent(value)
bare_quoted_string.append(token)
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if not value:
raise errors.HeaderParseError(
"Expected 'atom' or 'quoted-string' but found nothing.")
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
r""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if value and value[0] == '@':
raise errors.HeaderParseError('Invalid Domain')
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"addr-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
elif value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
def get_no_fold_literal(value):
""" no-fold-literal = "[" *dtext "]"
"""
no_fold_literal = NoFoldLiteral()
if not value:
raise errors.HeaderParseError(
"expected no-fold-literal but found '{}'".format(value))
if value[0] != '[':
raise errors.HeaderParseError(
"expected '[' at the start of no-fold-literal "
"but found '{}'".format(value))
no_fold_literal.append(ValueTerminal('[', 'no-fold-literal-start'))
value = value[1:]
token, value = get_dtext(value)
no_fold_literal.append(token)
if not value or value[0] != ']':
raise errors.HeaderParseError(
"expected ']' at the end of no-fold-literal "
"but found '{}'".format(value))
no_fold_literal.append(ValueTerminal(']', 'no-fold-literal-end'))
return no_fold_literal, value[1:]
def get_msg_id(value):
"""msg-id = [CFWS] "<" id-left '@' id-right ">" [CFWS]
id-left = dot-atom-text / obs-id-left
id-right = dot-atom-text / no-fold-literal / obs-id-right
no-fold-literal = "[" *dtext "]"
"""
msg_id = MsgID()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
msg_id.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected msg-id but found '{}'".format(value))
msg_id.append(ValueTerminal('<', 'msg-id-start'))
value = value[1:]
# Parse id-left.
try:
token, value = get_dot_atom_text(value)
except errors.HeaderParseError:
try:
# obs-id-left is same as local-part of add-spec.
token, value = get_obs_local_part(value)
msg_id.defects.append(errors.ObsoleteHeaderDefect(
"obsolete id-left in msg-id"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected dot-atom-text or obs-id-left"
" but found '{}'".format(value))
msg_id.append(token)
if not value or value[0] != '@':
msg_id.defects.append(errors.InvalidHeaderDefect(
"msg-id with no id-right"))
# Even though there is no id-right, if the local part
# ends with `>` let's just parse it too and return
# along with the defect.
if value and value[0] == '>':
msg_id.append(ValueTerminal('>', 'msg-id-end'))
value = value[1:]
return msg_id, value
msg_id.append(ValueTerminal('@', 'address-at-symbol'))
value = value[1:]
# Parse id-right.
try:
token, value = get_dot_atom_text(value)
except errors.HeaderParseError:
try:
token, value = get_no_fold_literal(value)
except errors.HeaderParseError as e:
try:
token, value = get_domain(value)
msg_id.defects.append(errors.ObsoleteHeaderDefect(
"obsolete id-right in msg-id"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected dot-atom-text, no-fold-literal or obs-id-right"
" but found '{}'".format(value))
msg_id.append(token)
if value and value[0] == '>':
value = value[1:]
else:
msg_id.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on msg-id"))
msg_id.append(ValueTerminal('>', 'msg-id-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
msg_id.append(token)
return msg_id, value
def parse_message_id(value):
"""message-id = "Message-ID:" msg-id CRLF
"""
message_id = MessageID()
try:
token, value = get_msg_id(value)
message_id.append(token)
except errors.HeaderParseError as ex:
token = get_unstructured(value)
message_id = InvalidMessageID(token)
message_id.defects.append(
errors.InvalidHeaderDefect("Invalid msg-id: {!r}".format(ex)))
else:
# Value after parsing a valid msg_id should be None.
if value:
message_id.defects.append(errors.InvalidHeaderDefect(
"Unexpected {!r}".format(value)))
return message_id
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError(
"section number has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '"':
token = ValueTerminal('"', 'DQUOTE')
value = value[1:]
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the remaining value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
#
# Header folding
#
# Header folding is complex, with lots of rules and corner cases. The
# following code does its best to obey the rules and handle the corner
# cases, but you can be sure there are few bugs:)
#
# This folder generally canonicalizes as it goes, preferring the stringified
# version of each token. The tokens contain information that supports the
# folder, including which tokens can be encoded in which ways.
#
# Folded text is accumulated in a simple list of strings ('lines'), each
# one of which should be less than policy.max_line_length ('maxlen').
#
def _steal_trailing_WSP_if_exists(lines):
wsp = ''
if lines and lines[-1] and lines[-1][-1] in WSP:
wsp = lines[-1][-1]
lines[-1] = lines[-1][:-1]
return wsp
def _refold_parse_tree(parse_tree, *, policy):
"""Return string of contents of parse_tree folded according to RFC rules.
"""
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or sys.maxsize
encoding = 'utf-8' if policy.utf8 else 'us-ascii'
lines = ['']
last_ew = None
wrap_as_ew_blocked = 0
want_encoding = False
end_ew_not_allowed = Terminal('', 'wrap_as_ew_blocked')
parts = list(parse_tree)
while parts:
part = parts.pop(0)
if part is end_ew_not_allowed:
wrap_as_ew_blocked -= 1
continue
tstr = str(part)
if part.token_type == 'ptext' and set(tstr) & SPECIALS:
# Encode if tstr contains special characters.
want_encoding = True
try:
tstr.encode(encoding)
charset = encoding
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# If policy.utf8 is false this should really be taken from a
# 'charset' property on the policy.
charset = 'utf-8'
want_encoding = True
if part.token_type == 'mime-parameters':
# Mime parameter folding (using RFC2231) is extra special.
_fold_mime_parameters(part, lines, maxlen, encoding)
continue
if want_encoding and not wrap_as_ew_blocked:
if not part.as_ew_allowed:
want_encoding = False
last_ew = None
if part.syntactic_break:
encoded_part = part.fold(policy=policy)[:-len(policy.linesep)]
if policy.linesep not in encoded_part:
# It fits on a single line
if len(encoded_part) > maxlen - len(lines[-1]):
# But not on this one, so start a new one.
newline = _steal_trailing_WSP_if_exists(lines)
# XXX what if encoded_part has no leading FWS?
lines.append(newline)
lines[-1] += encoded_part
continue
# Either this is not a major syntactic break, so we don't
# want it on a line by itself even if it fits, or it
# doesn't fit on a line by itself. Either way, fall through
# to unpacking the subparts and wrapping them.
if not hasattr(part, 'encode'):
# It's not a Terminal, do each piece individually.
parts = list(part) + parts
else:
# It's a terminal, wrap it as an encoded word, possibly
# combining it with previously encoded words if allowed.
last_ew = _fold_as_ew(tstr, lines, maxlen, last_ew,
part.ew_combine_allowed, charset)
want_encoding = False
continue
if len(tstr) <= maxlen - len(lines[-1]):
lines[-1] += tstr
continue
# This part is too long to fit. The RFC wants us to break at
# "major syntactic breaks", so unless we don't consider this
# to be one, check if it will fit on the next line by itself.
if (part.syntactic_break and
len(tstr) + 1 <= maxlen):
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
last_ew = None
continue
if not hasattr(part, 'encode'):
# It's not a terminal, try folding the subparts.
newparts = list(part)
if not part.as_ew_allowed:
wrap_as_ew_blocked += 1
newparts.append(end_ew_not_allowed)
parts = newparts + parts
continue
if part.as_ew_allowed and not wrap_as_ew_blocked:
# It doesn't need CTE encoding, but encode it anyway so we can
# wrap it.
parts.insert(0, part)
want_encoding = True
continue
# We can't figure out how to wrap, it, so give up.
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
else:
# We can't fold it onto the next line either...
lines[-1] += tstr
return policy.linesep.join(lines) + policy.linesep
def _fold_as_ew(to_encode, lines, maxlen, last_ew, ew_combine_allowed, charset):
"""Fold string to_encode into lines as encoded word, combining if allowed.
Return the new value for last_ew, or None if ew_combine_allowed is False.
If there is already an encoded word in the last line of lines (indicated by
a non-None value for last_ew) and ew_combine_allowed is true, decode the
existing ew, combine it with to_encode, and re-encode. Otherwise, encode
to_encode. In either case, split to_encode as necessary so that the
encoded segments fit within maxlen.
"""
if last_ew is not None and ew_combine_allowed:
to_encode = str(
get_unstructured(lines[-1][last_ew:] + to_encode))
lines[-1] = lines[-1][:last_ew]
if to_encode[0] in WSP:
# We're joining this to non-encoded text, so don't encode
# the leading blank.
leading_wsp = to_encode[0]
to_encode = to_encode[1:]
if (len(lines[-1]) == maxlen):
lines.append(_steal_trailing_WSP_if_exists(lines))
lines[-1] += leading_wsp
trailing_wsp = ''
if to_encode[-1] in WSP:
# Likewise for the trailing space.
trailing_wsp = to_encode[-1]
to_encode = to_encode[:-1]
new_last_ew = len(lines[-1]) if last_ew is None else last_ew
encode_as = 'utf-8' if charset == 'us-ascii' else charset
# The RFC2047 chrome takes up 7 characters plus the length
# of the charset name.
chrome_len = len(encode_as) + 7
if (chrome_len + 1) >= maxlen:
raise errors.HeaderParseError(
"max_line_length is too small to fit an encoded word")
while to_encode:
remaining_space = maxlen - len(lines[-1])
text_space = remaining_space - chrome_len
if text_space <= 0:
lines.append(' ')
continue
to_encode_word = to_encode[:text_space]
encoded_word = _ew.encode(to_encode_word, charset=encode_as)
excess = len(encoded_word) - remaining_space
while excess > 0:
# Since the chunk to encode is guaranteed to fit into less than 100 characters,
# shrinking it by one at a time shouldn't take long.
to_encode_word = to_encode_word[:-1]
encoded_word = _ew.encode(to_encode_word, charset=encode_as)
excess = len(encoded_word) - remaining_space
lines[-1] += encoded_word
to_encode = to_encode[len(to_encode_word):]
if to_encode:
lines.append(' ')
new_last_ew = len(lines[-1])
lines[-1] += trailing_wsp
return new_last_ew if ew_combine_allowed else None
def _fold_mime_parameters(part, lines, maxlen, encoding):
"""Fold TokenList 'part' into the 'lines' list as mime parameters.
Using the decoded list of parameters and values, format them according to
the RFC rules, including using RFC2231 encoding if the value cannot be
expressed in 'encoding' and/or the parameter+value is too long to fit
within 'maxlen'.
"""
# Special case for RFC2231 encoding: start from decoded values and use
# RFC2231 encoding iff needed.
#
# Note that the 1 and 2s being added to the length calculations are
# accounting for the possibly-needed spaces and semicolons we'll be adding.
#
for name, value in part.params:
# XXX What if this ';' puts us over maxlen the first time through the
# loop? We should split the header value onto a newline in that case,
# but to do that we need to recognize the need earlier or reparse the
# header, so I'm going to ignore that bug for now. It'll only put us
# one character over.
if not lines[-1].rstrip().endswith(';'):
lines[-1] += ';'
charset = encoding
error_handler = 'strict'
try:
value.encode(encoding)
encoding_required = False
except UnicodeEncodeError:
encoding_required = True
if utils._has_surrogates(value):
charset = 'unknown-8bit'
error_handler = 'surrogateescape'
else:
charset = 'utf-8'
if encoding_required:
encoded_value = urllib.parse.quote(
value, safe='', errors=error_handler)
tstr = "{}*={}''{}".format(name, charset, encoded_value)
else:
tstr = '{}={}'.format(name, quote_string(value))
if len(lines[-1]) + len(tstr) + 1 < maxlen:
lines[-1] = lines[-1] + ' ' + tstr
continue
elif len(tstr) + 2 <= maxlen:
lines.append(' ' + tstr)
continue
# We need multiple sections. We are allowed to mix encoded and
# non-encoded sections, but we aren't going to. We'll encode them all.
section = 0
extra_chrome = charset + "''"
while value:
chrome_len = len(name) + len(str(section)) + 3 + len(extra_chrome)
if maxlen <= chrome_len + 3:
# We need room for the leading blank, the trailing semicolon,
# and at least one character of the value. If we don't
# have that, we'd be stuck, so in that case fall back to
# the RFC standard width.
maxlen = 78
splitpoint = maxchars = maxlen - chrome_len - 2
while True:
partial = value[:splitpoint]
encoded_value = urllib.parse.quote(
partial, safe='', errors=error_handler)
if len(encoded_value) <= maxchars:
break
splitpoint -= 1
lines.append(" {}*{}*={}{}".format(
name, section, extra_chrome, encoded_value))
extra_chrome = ''
section += 1
value = value[splitpoint:]
if value:
lines[-1] += ';'
| xyuanmu/XX-Net | python3.8.2/Lib/email/_header_value_parser.py | Python | bsd-2-clause | 106,460 | [
"CRYSTAL"
] | 033ade7a9948d073b07e4f9d3dedfe0662ae1ce865b62cda317741e57728c449 |
#Turbomole converter
#Converts basis files in turbomole format to C++ files readable by Fermion Mingle
import glob
import os
def extract_m_basisinfo(filename, printing = False):
#Extract multiple basises from a turbomole file
swtch = 0
f = open(filename)
comments = []
basis_sets = [] #list containing all basis sets
basis = []
basisW = []
basisE = []
basisT = []
contractedtype = []
activecontracted = -1
for i in f:
I = i.split()
if len(I)==0:
continue
if I[0] == "*":
continue #ignore wildcards
if I[0] == '#':
comments.append("//")
comments[-1] += i
#for e in I:
# comments[-1] += e + " "
continue
#print I
if I[0][0] == "$":
continue
if I[0] in ["h", "he", "li", "be", "b", "c", "n", "o", "f", "ne", "na" ,"mg", "al", "si", "p", "s", "cl", "ar", "k", "ca", "sc", "ti", "v", "cr", "mn", "fe", "co", "ni", "cu", "zn", "ga", "ge", "as", "se","br", "kr", "rb", "sr", "y", "zr", "nb", "mo", "tc", "ru", "rh", "pd", "ag", "cd", "in", "sn", "sb", "te", "i", "xe"]:
#If not first time here, put current basis into list
if swtch == 1:
basis_sets.append([bT, basisW, basisE, contractedtype, len(contractedtype)])
basisW = []
basisE = []
contractedtype = []
activecontracted = -1
swtch = 1
#Begin collecting data
bT = I[1]+"_"+I[0]
bT = bT.replace("-", "_") #basistype
bT = bT.replace("(", "") #basistype
bT = bT.replace(")", "") #basistype
bT = bT.replace(",", "") #basistype
continue
if I[0] in "123456789":
if I[1] == "s":
#print "Identified an s orbital."
contractedtype.append(0)
if I[1] == "p":
#print "Identified an p orbital."
contractedtype.append(1)
if I[1] == "d":
#print "Identified an d orbital."
contractedtype.append(2)
if I[1] == "f":
#print "Identified an f orbital."
contractedtype.append(3)
#Create contracted
basis.append([])
basisE.append([])
basisW.append([])
activecontracted += 1
#print I
else:
try:
w = float(I[0].replace("D", "E"))
e = float(I[1].replace("D", "E"))
basisE[activecontracted].append(e)
basisW[activecontracted].append(w)
#basisE[activecontracted].append(float(I[1]))
#basisW[activecontracted].append(float(I[0]))
except:
comments.append("//Ignored the following information from file:")
for e in I:
comments[-1] += e + " "
#comments[-1] += "Ignored the following information:" + I
#print "Failed to read weigths and exponents."
#print I
#print I
if printing:
print comments
print contractedtype
print len(contractedtype)
print basisE
print basisW
f.close()
#return comments, contractedtype, len(contractedtype), basisE, basisW
return comments, basis_sets
def extract_basisinfo(filename, printing = False):
swtch = 0
f = open(filename)
comments = []
basis = []
basisW = []
basisE = []
basisT = []
contractedtype = []
activecontracted = -1
for i in f:
I = i.split()
if len(I)==0:
continue
if I[0] == "*":
continue #ignore wildcards
if I[0] == '#':
comments.append("//")
for e in I:
comments[-1] += e + " "
continue
#print I
if I[0][0] == "$":
swtch += 1
swtch = swtch %2
#if swtch == 1:
#Add new function
continue
if I[0] in ["h", "he", "li", "be", "b", "c", "n", "o", "f", "ne", "na" ,"mg", "al", "si", "p", "s", "cl", "ar", "k", "ca", "sc", "ti", "v", "cr", "mn", "fe", "co", "ni", "cu", "zn", "ga", "ge", "as", "se","br", "kr", "rb", "sr", "y", "zr", "nb", "mo", "tc", "ru", "rh", "pd", "ag", "cd", "in", "sn", "sb", "te", "i", "xe"]:
bT = I[1]+"_"+I[0]
bT = bT.replace("-", "_")
basisT.append(bT)
print basisT[-1]
continue
if I[0] in "123456789":
if I[1] == "s":
#print "Identified an s orbital."
contractedtype.append(0)
if I[1] == "p":
#print "Identified an p orbital."
contractedtype.append(1)
if I[1] == "d":
#print "Identified an d orbital."
contractedtype.append(2)
if I[1] == "f":
#print "Identified an f orbital."
contractedtype.append(3)
#Create contracted
basis.append([])
basisE.append([])
basisW.append([])
activecontracted += 1
#print I
else:
try:
w = float(I[0].replace("D", "E"))
e = float(I[1].replace("D", "E"))
basisE[activecontracted].append(e)
basisW[activecontracted].append(w)
except:
comments.append("//Ignored the following information from file:")
for e in I:
comments[-1] += e + " "
#comments[-1] += "Ignored the following information:" + I
#print "Failed to read weigths and exponents."
#print I
#print I
if printing:
print comments
print contractedtype
print len(contractedtype)
print basisE
print basisW
f.close()
return comments, contractedtype, len(contractedtype), basisE, basisW
def createfunction(fname, comments, orbital_types, N_orbitals, exponents, weights):
endline = ["\n"]
cppclass = []
cppheader = []
for i in comments:
cppclass.append(i)
#cppheader.append(i)
#cppclass.append(endline)
#cppheader.append(endline)
#writing to header
cppheader.append(" void add_"+fname+"(vec3 corePos);")
cppclass.append("void basisbank::add_"+fname+"(vec3 corePos){\n")
for i in range(N_orbitals):
if orbital_types[i] == 0:
#create an s-orbital
cppclass.append(" bs.add_state();")
for e in range(len(exponents[i])):
cppclass.append(" Primitive S%iA%i = bs.turbomolePrimitive(%.8f,%.8f,0,0,0,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, S%iA%i);" % (e,i))
if orbital_types[i] == 1:
#create an p-orbital
cppclass.append(" bs.add_state();")
for e in range(len(exponents[i])):
cppclass.append(" Primitive P%iA%i = bs.turbomolePrimitive(%.8f,%.8f,1,0,0,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, P%iA%i);" %( e,i))
cppclass.append(" Primitive P%iB%i = bs.turbomolePrimitive(%.8f,%.8f,0,1,0,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, P%iB%i);" % (e,i))
cppclass.append(" Primitive P%iC%i = bs.turbomolePrimitive(%.8f,%.8f,0,0,1,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, P%iC%i);" % (e,i))
if orbital_types[i] == 2:
#create an d-orbital
cppclass.append(" bs.add_state();")
for e in range(len(exponents[i])):
cppclass.append(" Primitive D%iA%i = bs.turbomolePrimitive(%.8f,%.8f,2,0,0,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, D%iA%i);" % (e,i))
cppclass.append(" Primitive D%iB%i = bs.turbomolePrimitive(%.8f,%.8f,0,2,0,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, D%iB%i);" %(e,i))
cppclass.append(" Primitive D%iC%i = bs.turbomolePrimitive(%.8f,%.8f,0,0,2,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, D%iC%i);" %( e,i))
cppclass.append(" Primitive D%iD%i = bs.turbomolePrimitive(%.8f,%.8f,1,1,0,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, D%iD%i);" % (e,i))
cppclass.append(" Primitive D%iE%i = bs.turbomolePrimitive(%.8f,%.8f,0,1,1,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, D%iE%i);" % (e,i))
cppclass.append(" Primitive D%iF%i = bs.turbomolePrimitive(%.8f,%.8f,1,0,1,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, D%iF%i);" %( e,i))
if orbital_types[i] == 3:
#create an d-orbital
cppclass.append(" bs.add_state();")
for e in range(len(exponents[i])):
cppclass.append(" Primitive E%iA%i = bs.turbomolePrimitive(%.8f,%.8f,3,0,0,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iA%i);" % (e,i))
cppclass.append(" Primitive E%iB%i = bs.turbomolePrimitive(%.8f,%.8f,0,3,0,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iB%i);" %(e,i))
cppclass.append(" Primitive E%iC%i = bs.turbomolePrimitive(%.8f,%.8f,0,0,3,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iC%i);" %( e,i))
cppclass.append(" Primitive E%iD%i = bs.turbomolePrimitive(%.8f,%.8f,1,2,0,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iD%i);" % (e,i))
cppclass.append(" Primitive E%iE%i = bs.turbomolePrimitive(%.8f,%.8f,0,1,2,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iE%i);" % (e,i))
cppclass.append(" Primitive E%iF%i = bs.turbomolePrimitive(%.8f,%.8f,1,0,2,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iF%i);" %( e,i))
cppclass.append(" Primitive E%iG%i = bs.turbomolePrimitive(%.8f,%.8f,2,1,0,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iG%i);" % (e,i))
cppclass.append(" Primitive E%iH%i = bs.turbomolePrimitive(%.8f,%.8f,0,2,1,corePos);" % (e,i, exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iH%i);" % (e,i))
cppclass.append(" Primitive E%iI%i = bs.turbomolePrimitive(%.8f,%.8f,2,0,1,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iI%i);" %( e,i))
cppclass.append(" Primitive E%iJ%i = bs.turbomolePrimitive(%.8f,%.8f,1,1,1,corePos);" % (e, i,exponents[i][e], weights[i][e]))
cppclass.append(" bs.add_primitive_to_state(bs.Nstates-1, E%iJ%i);" %( e,i))
cppclass.append("}\n")
return cppheader, cppclass
def saveclass(cppclasses, cppheaders):
cppfile = "//This file is maintained by an external python script and should not be edited manually.\n#include <basisbank.h>\n#include <armadillo>\n#include <basis.h>\n#include <primitive.h>\nusing namespace std;\nusing namespace arma;\nbasisbank::basisbank(basis BS){\n bs = BS;} \nbasisbank::basisbank(){} \n \n"
for i in cppclasses:
for e in i:
cppfile += e+ "\n"
cppfile += "\n"
#print cppfile
cppheader = "//This file is maintained by an external python script and should not be edited manually.\n#ifndef BASISBANK_H\n#define BASISBANK_H\n#include <armadillo>\n#include <basis.h>\n#include <primitive.h>\nusing namespace std;\nusing namespace arma;\n \nclass basisbank{\npublic:\n basisbank(basis BS);\n basisbank();\n basis bs;\n string basistype;"
for i in cppheaders:
for e in i:
cppheader += e + "\n"
cppheader += "};\n"
cppheader += "#endif // BASISBANK_H"
#print cppheader
#Write files
cpp_header = open("basisbank.h", "w")
cpp_class = open("basisbank.cpp", "w")
cpp_header.write(cppheader)
cpp_class.write(cppfile)
cpp_header.close()
cpp_class.close()
def convertfolder():
cppclasses = []
cppheaders = []
folder = [] #a list containing all files in folder
#os.chdir("/")
#print os.getcwd()
for filename in os.listdir(os.getcwd()):
if filename.endswith(".txt"):
try:
#comments, orbital_types, N_orbitals, exponents, weigths = extract_basisinfo(filename)
comments, I = extract_m_basisinfo(filename)
commented = 0
#print comments
for i in I:
#i = [bT, basisW, basisE, contractedtype, len(contractedtype)]
if commented == 0:
cppheader, cppclass = createfunction(i[0], comments, i[3], i[4], i[2], i[1])
cppclasses.append(cppclass)
cppheaders.append(cppheader)
commented = 1
else:
cppheader, cppclass = createfunction(i[0], [""], i[3], i[4], i[2], i[1])
cppclasses.append(cppclass)
cppheaders.append(cppheader)
except:
print "Failed to import file:", filename
saveclass(cppclasses, cppheaders)
#filename = "STO6G_H.txt"
#comments, orbital_types, N_orbitals, exponents, weigths = extract_basisinfo(filename)
#cppheader, cppclass = createfunction(filename, comments, orbital_types, N_orbitals, exponents, weigths)
convertfolder() #converts all .txt files in current folder
#saveclass([cppclass],[cppheader])
#filename = "sto3g_all.txt"
#comm, info = extract_m_basisinfo(filename)
#for i in info:
# print i[0]
#print exponents
convertfolder() #converts all .txt files in current folder
| CompPhysics/ThesisProjects | doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/TurbomoleConverter/TurbomoleConverter.py | Python | cc0-1.0 | 15,786 | [
"TURBOMOLE"
] | 8d491cb6a67df6c33eb84bb9f720eaf073fe6322c1c3e2d52f24159e31de0b6d |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def initial_data(apps, schema_editor):
TC = apps.get_model('tech', 'TechCategory')
T = apps.get_model('tech', 'Tech')
tc = TC(name='Physics', cost=50, category='PH')
tc.save()
T(name='Laser Cannon', categ=tc, desc="""Highly focused, coherent beams of light which deliver 1-4 points of damage. Modifications: autofire, armor piercing, heavy mount, continuous, point defense, no range dissipation.""").save()
T(name='Laser Rifle', categ=tc, desc="""A hand-held weapon that increases the combat rating of militia, troops, and armor by +5. The bonus is not cumulative with other rifles.""").save()
T(name='Space Scanner', categ=tc, desc="""Detects enemy ships. Base detection range is 1 parsec. Ships in transit can be detected 1 parsec further per size class of ship.""").save()
tc = TC(name='Fusion Physics', cost=150, category='PH')
tc.save()
T(name='Fusion Beam', categ=tc, desc="""Projects a tetrium stream of charged particles, inflicting 2-6 points of damage Modifications: heavy mount, continuous, point defense, enveloping.""").save()
T(name='Fusion Rifle', categ=tc, desc="""A powerful hand-held rifle that increases the combat rating of militia, troops, and armor by +10. The bonus is not cumulative with other rifles.""").save()
tc = TC(name='Tachyon Physics', cost=250, category='PH')
tc.save()
T(name='Tachyon Communication', categ=tc, desc="""Integrated into star bases, battlestations and star fortresses. Emits signals which penetrate hyperspace. Ships within 3 parsecs can recieve your orders. Increases the command points of these bases by +1.""").save()
T(name='Tachyon Scanner', categ=tc, desc="""Detects enemy ships. Has a base detection range of 3 parsecs. Ships in transit can be detected 1 parsec further per size class of ship. Tachyon scanners reduce enemy ship's missile evasion by -20.""").save()
T(name='Battle Scanner', categ=tc, desc="""Increases a ship's chance to hit with beam weapons by +50%. Adds +2 parsecs to galactic scanning range.""").save()
tc = TC(name='Neutrino Physics', cost=900, category='PH')
tc.save()
T(name='Neutron Blaster', categ=tc, desc="""Fires an intense beam of lethal radiation. Inflicts 3-12 points of damage to shields. Any damage that penetrates the shields kills 1 marine unit for every 5 points of internal damage. Modifications: heavy mount, continuous.""").save()
T(name='Neutron Scanner', categ=tc, desc="""Detects enemy ships. Has a base detection range of 5 parsecs. Ships in transit can be detected at 1 parsec per size class of ship greater range. Neutron scanners reduce enemy ship's missile evasion by -40.""").save()
tc = TC(name='Artificial Gravity', cost=1150, category='PH')
tc.save()
T(name='Tractor Beam', categ=tc, desc="""It requires 1 tractor beam per size class of target ship to completely stop the target. Ships not held completely lose a portion of movement. Motionless ships can be boarded and are easy targets to hit.""").save()
T(name='Graviton Beam', categ=tc, desc="""Gravitic waves that tear a target apart. Inflicts 3-15 points of damage. Any damage penetrating the ship's shields inflicts extra structural damage. Modifications: heavy mount, continuous.""").save()
T(name='Planetary Gravity Generator', categ=tc, desc="""Creates artificial gravity to normalize a planet to standard gravity limits. Gravity generators eliminate the negative effects of low and high gravity fields.""").save()
tc = TC(name='Subspace Physics', cost=1500, category='PH')
tc.save()
T(name='Subspace Communication', categ=tc, desc="""Allows you to issue orders to any ship within 6 squares of a star system with a starbase, battlestation, or star fortress. Increases the command points given by a base by +2 points.""").save()
T(name='Jump Gate', categ=tc, desc="""Forms a controlled wormhole between two points, increasing the speed of ships moving through it by 3 parsecs a turn. Once discovered, all colonies will automatically be equipped with one.""").save()
tc = TC(name='Multi-Phased Physics', cost=2000, category='PH')
tc.save()
T(name='Phasor', categ=tc, desc="""Fires a trans-warp beam of phased energy that actually exists in several dimensions simultaneously, inflicting 5-20 points of damage. Modifications: autofire, continuous, heavy mount, point defense, shield piercing.""").save()
T(name='Phasor Rifle', categ=tc, desc="""A hand-held rifle that can almost disintegrate an opponent with one blast, increasing the combat rating of militia, troops and armor by +20. The bonus is not cumulative with other rifles.""").save()
T(name='Multi-Phased Shields', categ=tc, desc="""Allow ships to rapidly change the frequency of their shields, increasing the maximum strength by +50%.""").save()
tc = TC(name='Plasma Physics', cost=3500, category='PH')
tc.save()
T(name='Plasma Cannon', categ=tc, desc="""Fires a blast of plasma energy inflicting 6-30 poitns of damage to all 4 shields of the target. Only focuses well over a short range, doubling all range penalties. Modifications: heavy mount, and continuous fire.""").save()
T(name='Plasma Rifle', categ=tc, desc="""The most powerful held weapon, increasing the combat rating of militia, troops, and armor by +30. The bonus is not cumulative with other rifle bonuses.""").save()
T(name='Plasma Web', categ=tc, desc="""An energy web that envelopes a target within a 15 square range. Inflicts 5-25 points of damage per turn. Thsi damage persists, dissipating at a rate of 5 points per turn.""").save()
tc = TC(name='Multi-Dimensiomal Physics', cost=4500, category='PH')
tc.save()
T(name='Disruptor Cannon', categ=tc, desc="""Fires intense bolts of energy that inflicts 40 points of damage. Damage from disrupter bolts is not reduced by range. Modifications: autofire, heavy mount.""").save()
T(name='Dimensional Portal', categ=tc, desc="""Allows ships to cross into the dimension of the Antaran home world. To use it, select a fleet in the same system as the portal and press the 'Attack Antarans' button on the fleet pop-up.""").save()
tc = TC(name='Hyper-Dimensional Physics', cost=6000, category='PH')
tc.save()
T(name='Hyperspace Communication', categ=tc, desc="""Allows you to communicate with any ship already in hyperspace so you can change its destination. Increases the command points of star bases, battle stations and star fortresses by +3 points.""").save()
T(name='Sensors', categ=tc, desc="""Detects enemy ships. Has a base detection range of 8 parsecs. Ships in transit can be detected 1 parsec further per size class of ship. Sensors reduce enemy ship's missile evasion by -70.""").save()
T(name='Mauler Device', categ=tc, desc="""Fires a bolt of pure energy that always hits. Inflicts 100 points of damage, but with double the range penalties for dissipation. Modifications: heavy mount.""").save()
tc = TC(name='Temporal Physics', cost=15000, category='PH')
tc.save()
T(name='Time Warp Facilitator', categ=tc, desc="""Allows a ship to shift in and out of the time-space continuum, enabling it to take an additional turn at the end of every combat turn.""").save()
T(name='Stellar Converter', categ=tc, desc="""A plasma cannon that can be mounted on a ship or planet. Automatically inflicts 400 hits to all shields of any ship, regardless of range and defense. Completely destroys a planet when used for orbital bombardment.""").save()
T(name='Star Gate', categ=tc, desc="""Allows instantaneous movement between any of your colonies. All colonies are automatically equipped with one once the technology is researched.""").save()
class Migration(migrations.Migration):
dependencies = [
('tech', '0008_biology_techs'),
]
operations = [
migrations.RunPython(initial_data),
]
# EOF
| dwagon/pymoo | moo/tech/migrations/0009_physics_techs.py | Python | gpl-2.0 | 7,981 | [
"BLAST"
] | e67103871ebf7d93cc0c27126bfe732c3304eed720219e8f17c9cb49690f75c2 |
''' Some tests for filters '''
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_raises, assert_allclose,
assert_array_equal, assert_almost_equal,
TestCase, run_module_suite)
import scipy.ndimage as sndi
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_gh_5430():
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
sigma = np.int32(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
sigma = np.int64(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = 1
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = [1, 1]
out = sndi._ni_support._normalize_sequence(sigma, 2)
assert_equal(out, sigma)
# Also include the OPs original example to make sure we fixed the issue
x = np.random.normal(size=(256, 256))
perlin = np.zeros_like(x)
for i in 2**np.arange(6):
perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2
# This also fixes gh-4106, show that the OPs example now runs.
x = np.int64(21)
sndi._ni_support._normalize_sequence(x, 0)
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0)
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4
def test_valid_origins():
"""Regression test for #1311."""
func = lambda x: np.mean(x)
data = np.array([1,2,3,4,5], dtype=np.float64)
assert_raises(ValueError, sndi.generic_filter, data, func, size=3,
origin=2)
func2 = lambda x, y: np.mean(x + y)
assert_raises(ValueError, sndi.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [sndi.uniform_filter, sndi.minimum_filter,
sndi.maximum_filter, sndi.maximum_filter1d,
sndi.median_filter, sndi.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_multiple_modes():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying a single mode.
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
mode1 = 'reflect'
mode2 = ['reflect', 'reflect']
assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
sndi.gaussian_filter(arr, 1, mode=mode2))
assert_equal(sndi.prewitt(arr, mode=mode1),
sndi.prewitt(arr, mode=mode2))
assert_equal(sndi.sobel(arr, mode=mode1),
sndi.sobel(arr, mode=mode2))
assert_equal(sndi.laplace(arr, mode=mode1),
sndi.laplace(arr, mode=mode2))
assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
sndi.gaussian_laplace(arr, 1, mode=mode2))
assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
sndi.maximum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
sndi.minimum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
sndi.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_prewitt():
# Test prewitt filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -3., 2.],
[1., -2., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.prewitt(arr, mode=modes))
def test_multiple_modes_sobel():
# Test sobel filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -4., 3.],
[2., -3., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.sobel(arr, mode=modes))
def test_multiple_modes_laplace():
# Test laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-2., 2., 1.],
[-2., -3., 2.],
[1., 1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.laplace(arr, mode=modes))
def test_multiple_modes_gaussian_laplace():
# Test gaussian_laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
[-0.36630503, -0.20069774, 0.07483620],
[0.15849176, 0.18495566, 0.21934094]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.gaussian_laplace(arr, 1, mode=modes))
def test_multiple_modes_gaussian_gradient_magnitude():
# Test gaussian_gradient_magnitude filter for multiple
# extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.04928965, 0.09745625, 0.06405368],
[0.23056905, 0.14025305, 0.04550846],
[0.19894369, 0.14950060, 0.06796850]])
modes = ['reflect', 'wrap']
calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes)
assert_almost_equal(expected, calculated)
def test_multiple_modes_uniform():
# Test uniform filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.32, 0.40, 0.48],
[0.20, 0.28, 0.32],
[0.28, 0.32, 0.40]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.where(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
class TestThreading(TestCase):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = np.arange(10)
out = sndi.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = sndi.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = sndi.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
#Test constant
out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = sndi.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = sndi.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = sndi.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_footprint_all_zeros():
# regression test for gh-6876: footprint of all zeros segfaults
arr = np.random.randint(0, 100, (100, 100))
kernel = np.zeros((3, 3), bool)
with assert_raises(ValueError):
sndi.maximum_filter(arr, footprint=kernel)
if __name__ == "__main__":
run_module_suite(argv=sys.argv)
| asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/ndimage/tests/test_filters.py | Python | mit | 14,168 | [
"Gaussian"
] | d08d63c3a206179a591249dfd700f0d0f112b2cda15847b41c4592f5d2761f37 |
"""
This class brings together a L{solve.Solver} to choose a set of implmentations, a
L{fetch.Fetcher} to download additional components, and the user's configuration
settings.
@since: 0.53
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
from logging import info, debug
from zeroinstall.injector import arch, model
from zeroinstall.injector.model import network_offline
from zeroinstall.support import tasks
class Driver(object):
"""Chooses a set of implementations based on a policy.
Typical use:
1. Create a Driver object, giving it the requirements about the program to be run.
2. Call L{solve_with_downloads}. If more information is needed, a L{fetch.Fetcher} will be used to download it.
3. When all downloads are complete, the L{solver} contains the chosen versions.
4. Use L{get_uncached_implementations} to find where to get these versions and download them
using L{download_uncached_implementations}.
@ivar target_arch: target architecture for binaries (deprecated)
@type target_arch: L{arch.Architecture}
@ivar solver: solver used to choose a set of implementations
@type solver: L{solve.Solver}
@ivar watchers: callbacks to invoke after solving
"""
__slots__ = ['watchers', 'requirements', 'config', 'target_arch', 'solver']
def __init__(self, config, requirements):
"""
@param config: The configuration settings to use
@type config: L{config.Config}
@param requirements: Details about the program we want to run
@type requirements: L{requirements.Requirements}
@since: 0.53
"""
self.watchers = []
assert config
self.config = config
assert requirements
self.requirements = requirements
self.target_arch = arch.get_architecture(requirements.os, requirements.cpu)
from zeroinstall.injector.solver import DefaultSolver
self.solver = DefaultSolver(self.config)
debug(_("Supported systems: '%s'"), arch.os_ranks)
debug(_("Supported processors: '%s'"), arch.machine_ranks)
if requirements.before or requirements.not_before:
self.solver.extra_restrictions[config.iface_cache.get_interface(requirements.interface_uri)] = [
model.VersionRangeRestriction(model.parse_version(requirements.before),
model.parse_version(requirements.not_before))]
def get_uncached_implementations(self):
"""List all chosen implementations which aren't yet available locally.
@rtype: [(L{model.Interface}, L{model.Implementation})]"""
iface_cache = self.config.iface_cache
stores = self.config.stores
uncached = []
for uri, selection in self.solver.selections.selections.iteritems():
impl = selection.impl
assert impl, self.solver.selections
if not impl.is_available(stores):
uncached.append((iface_cache.get_interface(uri), impl))
return uncached
@tasks.async
def solve_with_downloads(self, force = False, update_local = False):
"""Run the solver, then download any feeds that are missing or
that need to be updated. Each time a new feed is imported into
the cache, the solver is run again, possibly adding new downloads.
@param force: whether to download even if we're already ready to run.
@param update_local: fetch PackageKit feeds even if we're ready to run."""
downloads_finished = set() # Successful or otherwise
downloads_in_progress = {} # URL -> Download
# There are three cases:
# 1. We want to run immediately if possible. If not, download all the information we can.
# (force = False, update_local = False)
# 2. We're in no hurry, but don't want to use the network unnecessarily.
# We should still update local information (from PackageKit).
# (force = False, update_local = True)
# 3. The user explicitly asked us to refresh everything.
# (force = True)
try_quick_exit = not (force or update_local)
while True:
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if try_quick_exit and self.solver.ready:
break
try_quick_exit = False
if not self.solver.ready:
force = True
for f in self.solver.feeds_used:
if f in downloads_finished or f in downloads_in_progress:
continue
if os.path.isabs(f):
if force:
self.config.iface_cache.get_feed(f, force = True)
downloads_in_progress[f] = tasks.IdleBlocker('Refresh local feed')
continue
elif f.startswith('distribution:'):
if force or update_local:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
elif force and self.config.network_use != network_offline:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
# Once we've starting downloading some things,
# we might as well get them all.
force = True
if not downloads_in_progress:
if self.config.network_use == network_offline:
info(_("Can't choose versions and in off-line mode, so aborting"))
break
# Wait for at least one download to finish
blockers = downloads_in_progress.values()
yield blockers
tasks.check(blockers, self.config.handler.report_error)
for f in downloads_in_progress.keys():
if f in downloads_in_progress and downloads_in_progress[f].happened:
del downloads_in_progress[f]
downloads_finished.add(f)
# Need to refetch any "distribution" feed that
# depends on this one
distro_feed_url = 'distribution:' + f
if distro_feed_url in downloads_finished:
downloads_finished.remove(distro_feed_url)
if distro_feed_url in downloads_in_progress:
del downloads_in_progress[distro_feed_url]
@tasks.async
def solve_and_download_impls(self, refresh = False, select_only = False):
"""Run L{solve_with_downloads} and then get the selected implementations too.
@raise SafeException: if we couldn't select a set of implementations
@since: 0.40"""
refreshed = self.solve_with_downloads(refresh)
if refreshed:
yield refreshed
tasks.check(refreshed)
if not self.solver.ready:
raise self.solver.get_failure_reason()
if not select_only:
downloaded = self.download_uncached_implementations()
if downloaded:
yield downloaded
tasks.check(downloaded)
def need_download(self):
"""Decide whether we need to download anything (but don't do it!)
@return: true if we MUST download something (feeds or implementations)
@rtype: bool"""
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if not self.solver.ready:
return True # Maybe a newer version will work?
if self.get_uncached_implementations():
return True
return False
def download_uncached_implementations(self):
"""Download all implementations chosen by the solver that are missing from the cache."""
assert self.solver.ready, "Solver is not ready!\n%s" % self.solver.selections
stores = self.config.stores
return self.config.fetcher.download_impls([impl for impl in self.solver.selections.values() if not impl.is_available(stores)],
stores)
| dabrahams/zeroinstall | zeroinstall/injector/driver.py | Python | lgpl-2.1 | 7,058 | [
"VisIt"
] | 00fb21a9a22477aa506d412ac294ca2fe2fd95a322bed847e2bef7386f67dc4a |
# -*- coding: utf-8 -*-
# Copyright (c) 2007, 2008, Benoît Chesneau
# Copyright (c) 2007 Simon Willison, original work on django-openid
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the
# * distribution. Neither the name of the <ORGANIZATION> nor the names
# * of its contributors may be used to endorse or promote products
# * derived from this software without specific prior written
# * permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
import datetime
from django.http import HttpResponseRedirect, Http404
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.template import RequestContext, Context
from django.conf import settings as django_settings
from askbot.conf import settings as askbot_settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.forms.util import ErrorList
from django.shortcuts import render
from django.template.loader import get_template
from django.views.decorators import csrf
from django.utils.encoding import smart_unicode
from askbot.utils.functions import generate_random_key
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils import simplejson
from askbot.mail import send_mail
from askbot.utils import decorators as askbot_decorators
from askbot.utils.html import site_url
from recaptcha_works.decorators import fix_recaptcha_remote_ip
from askbot.deps.django_authopenid.ldap_auth import ldap_create_user
from askbot.deps.django_authopenid.ldap_auth import ldap_authenticate
from askbot.utils.loading import load_module
from sanction.client import Client as OAuth2Client
from urlparse import urlparse
from openid.consumer.consumer import Consumer, \
SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg
# needed for some linux distributions like debian
try:
from openid.yadis import xri
except ImportError:
from yadis import xri
try:
from xmlrpclib import Fault as WpFault
from wordpress_xmlrpc import Client
from wordpress_xmlrpc.methods.users import GetUserInfo
except ImportError:
pass
import urllib
from askbot import forms as askbot_forms
from askbot.deps.django_authopenid import util
from askbot.deps.django_authopenid import decorators
from askbot.deps.django_authopenid.models import UserAssociation, UserEmailVerifier
from askbot.deps.django_authopenid import forms
from askbot.deps.django_authopenid.backends import AuthBackend
import logging
from askbot.utils.forms import get_next_url
from askbot.utils.http import get_request_info
from askbot.models.signals import user_logged_in, user_registered
def create_authenticated_user_account(
username=None, email=None, password=None,
user_identifier=None, login_provider_name=None
):
"""creates a user account, user association with
the login method and the the default email subscriptions
"""
user = User.objects.create_user(username, email)
user_registered.send(None, user=user)
logging.debug('creating new openid user association for %s', username)
if password:
user.set_password(password)
user.save()
else:
UserAssociation(
openid_url = user_identifier,
user = user,
provider_name = login_provider_name,
last_used_timestamp = datetime.datetime.now()
).save()
subscribe_form = askbot_forms.SimpleEmailSubscribeForm({'subscribe': 'y'})
subscribe_form.full_clean()
logging.debug('saving email feed settings')
subscribe_form.save(user)
logging.debug('logging the user in')
user = authenticate(method='force', user_id=user.id)
if user is None:
error_message = 'please make sure that ' + \
'askbot.deps.django_authopenid.backends.AuthBackend' + \
'is in your settings.AUTHENTICATION_BACKENDS'
raise Exception(error_message)
return user
def cleanup_post_register_session(request):
"""delete keys from session after registration is complete"""
keys = (
'user_identifier',
'login_provider_name',
'username',
'email',
'password',
'validation_code'
)
for key in keys:
if key in request.session:
del request.session[key]
#todo: decouple from askbot
def login(request, user):
from django.contrib.auth import login as _login
# get old session key
session_key = request.session.session_key
# login and get new session key
_login(request, user)
# send signal with old session key as argument
logging.debug('logged in user %s with session key %s' % (user.username, session_key))
#todo: move to auth app
user_logged_in.send(
request = request,
user = user,
session_key=session_key,
sender=None
)
#todo: uncouple this from askbot
def logout(request):
from django.contrib.auth import logout as _logout#for login I've added wrapper below - called login
_logout(request)
def logout_page(request):
data = {
'page_class': 'meta',
'have_federated_login_methods': util.have_enabled_federated_login_methods()
}
return render(request, 'authopenid/logout.html', data)
def get_url_host(request):
if request.is_secure():
protocol = 'https'
else:
protocol = 'http'
host = escape(request.get_host())
return '%s://%s' % (protocol, host)
def get_full_url(request):
return get_url_host(request) + request.get_full_path()
def ask_openid(
request,
openid_url,
redirect_to,
on_failure=None,
sreg_request=None
):
""" basic function to ask openid and return response """
on_failure = on_failure or signin_failure
trust_root = getattr(
django_settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
django_settings, 'OPENID_DISALLOW_INAMES', False
):
msg = _("i-names are not supported")
logging.debug('openid failed because i-names are not supported')
return on_failure(request, msg)
consumer = Consumer(request.session, util.DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
openid_url = cgi.escape(openid_url)
msg = _(u"OpenID %(openid_url)s is invalid" % {'openid_url':openid_url})
logging.debug(msg)
return on_failure(request, msg)
logging.debug('openid seemed to work')
if sreg_request:
logging.debug('adding sreg_request - wtf it is?')
auth_request.addExtension(sreg_request)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
logging.debug('redirecting to %s' % redirect_url)
return HttpResponseRedirect(redirect_url)
def complete(request, on_success=None, on_failure=None, return_to=None):
""" complete openid signin """
assert(on_success is not None)
assert(on_failure is not None)
logging.debug('in askbot.deps.django_authopenid.complete')
consumer = Consumer(request.session, util.DjangoOpenIDStore())
# make sure params are encoded in utf8
params = dict((k,smart_unicode(v)) for k, v in request.GET.items())
openid_response = consumer.complete(params, return_to)
try:
logging.debug(u'returned openid parameters were: %s' % unicode(params))
except Exception, e:
logging.critical(u'fix logging statement above ' + unicode(e))
if openid_response.status == SUCCESS:
logging.debug('openid response status is SUCCESS')
return on_success(
request,
openid_response.identity_url,
openid_response
)
elif openid_response.status == CANCEL:
logging.debug('CANCEL')
return on_failure(request, 'The request was canceled')
elif openid_response.status == FAILURE:
logging.debug('FAILURE')
return on_failure(request, openid_response.message)
elif openid_response.status == SETUP_NEEDED:
logging.debug('SETUP NEEDED')
return on_failure(request, 'Setup needed')
else:
logging.debug('BAD OPENID STATUS')
assert False, "Bad openid status: %s" % openid_response.status
def not_authenticated(func):
""" decorator that redirect user to next page if
he/she is already logged in."""
def decorated(request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect(get_next_url(request))
return func(request, *args, **kwargs)
return decorated
def complete_oauth2_signin(request):
if 'next_url' in request.session:
next_url = request.session['next_url']
del request.session['next_url']
else:
next_url = reverse('index')
if 'error' in request.GET:
return HttpResponseRedirect(reverse('index'))
csrf_token = request.GET.get('state', None)
oauth2_csrf_token = request.session.pop('oauth2_csrf_token', None)
if csrf_token is None or csrf_token != oauth2_csrf_token:
return HttpResponseBadRequest()
providers = util.get_enabled_login_providers()
provider_name = request.session.pop('provider_name')
params = providers[provider_name]
assert(params['type'] == 'oauth2')
client_id = getattr(
askbot_settings,
provider_name.upper() + '_KEY',
)
client_secret = getattr(
askbot_settings,
provider_name.upper() + '_SECRET',
)
client = OAuth2Client(
token_endpoint=params['token_endpoint'],
resource_endpoint=params['resource_endpoint'],
redirect_uri=site_url(reverse('user_complete_oauth2_signin')),
client_id=client_id,
client_secret=client_secret,
token_transport=params.get('token_transport', None)
)
client.request_token(
code=request.GET['code'],
parser=params.get('response_parser', None)
)
#todo: possibly set additional parameters here
user_id = params['get_user_id_function'](client)
user = authenticate(
oauth_user_id = user_id,
provider_name = provider_name,
method = 'oauth'
)
logging.debug('finalizing oauth signin')
request.session['email'] = ''#todo: pull from profile
request.session['username'] = ''#todo: pull from profile
if (provider_name == 'facebook'):
profile = client.request("me")
request.session['email'] = profile.get('email', '')
request.session['username'] = profile.get('username', '')
return finalize_generic_signin(
request = request,
user = user,
user_identifier = user_id,
login_provider_name = provider_name,
redirect_url = next_url
)
def complete_oauth_signin(request):
if 'next_url' in request.session:
next_url = request.session['next_url']
del request.session['next_url']
else:
next_url = reverse('index')
if 'denied' in request.GET:
return HttpResponseRedirect(next_url)
if 'oauth_problem' in request.GET:
return HttpResponseRedirect(next_url)
try:
oauth_token = request.GET['oauth_token']
logging.debug('have token %s' % oauth_token)
oauth_verifier = request.GET['oauth_verifier']
logging.debug('have verifier %s' % oauth_verifier)
session_oauth_token = request.session['oauth_token']
logging.debug('have token from session')
assert(oauth_token == session_oauth_token['oauth_token'])
oauth_provider_name = request.session['oauth_provider_name']
logging.debug('have saved provider name')
del request.session['oauth_provider_name']
oauth = util.OAuthConnection(oauth_provider_name)
user_id = oauth.get_user_id(
oauth_token = session_oauth_token,
oauth_verifier = oauth_verifier
)
logging.debug('have %s user id=%s' % (oauth_provider_name, user_id))
user = authenticate(
oauth_user_id = user_id,
provider_name = oauth_provider_name,
method = 'oauth'
)
logging.debug('finalizing oauth signin')
request.session['email'] = ''#todo: pull from profile
request.session['username'] = ''#todo: pull from profile
return finalize_generic_signin(
request = request,
user = user,
user_identifier = user_id,
login_provider_name = oauth_provider_name,
redirect_url = next_url
)
except Exception, e:
logging.critical(e)
msg = _('Sorry, there was some problem '
'connecting to the login provider, please try again '
'or use another login method'
)
request.user.message_set.create(message = msg)
return HttpResponseRedirect(next_url)
#@not_authenticated
@csrf.csrf_protect
def signin(request, template_name='authopenid/signin.html'):
"""
signin page. It manages the legacy authentification (user/password)
and openid authentification
url: /signin/
template : authopenid/signin.htm
"""
logging.debug('in signin view')
on_failure = signin_failure
#we need a special priority on where to redirect on successful login
#here:
#1) url parameter "next" - if explicitly set
#2) url from django setting LOGIN_REDIRECT_URL
#3) home page of the forum
login_redirect_url = getattr(django_settings, 'LOGIN_REDIRECT_URL', None)
next_url = get_next_url(request, default = login_redirect_url)
logging.debug('next url is %s' % next_url)
if askbot_settings.ALLOW_ADD_REMOVE_LOGIN_METHODS == False \
and request.user.is_authenticated():
return HttpResponseRedirect(next_url)
if next_url == reverse('user_signin'):
next_url = '%(next)s?next=%(next)s' % {'next': next_url}
login_form = forms.LoginForm(initial = {'next': next_url})
#todo: get next url make it sticky if next is 'user_signin'
if request.method == 'POST':
login_form = forms.LoginForm(request.POST)
if login_form.is_valid():
provider_name = login_form.cleaned_data['login_provider_name']
if login_form.cleaned_data['login_type'] == 'password':
password_action = login_form.cleaned_data['password_action']
if askbot_settings.USE_LDAP_FOR_PASSWORD_LOGIN:
assert(password_action == 'login')
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(
username=username,
password=password,
method = 'ldap'
)
if user:
login(request, user)
return HttpResponseRedirect(next_url)
else:
#try to login again via LDAP
user_info = ldap_authenticate(username, password)
if user_info['success']:
if askbot_settings.LDAP_AUTOCREATE_USERS:
#create new user or
user = ldap_create_user(user_info).user
user = authenticate(method='force', user_id=user.id)
assert(user is not None)
login(request, user)
return HttpResponseRedirect(next_url)
else:
#continue with proper registration
ldap_username = user_info['ldap_username']
request.session['email'] = user_info['email']
request.session['ldap_user_info'] = user_info
if askbot_settings.AUTOFILL_USER_DATA:
request.session['username'] = ldap_username
request.session['first_name'] = \
user_info['first_name']
request.session['last_name'] = \
user_info['last_name']
return finalize_generic_signin(
request,
login_provider_name = 'ldap',
user_identifier = ldap_username + '@ldap',
redirect_url = next_url
)
else:
auth_fail_func_path = getattr(
django_settings,
'LDAP_AUTHENTICATE_FAILURE_FUNCTION',
None
)
if auth_fail_func_path:
auth_fail_func = load_module(auth_fail_func_path)
auth_fail_func(user_info, login_form)
else:
login_form.set_password_login_error()
#return HttpResponseRedirect(request.path)
else:
if password_action == 'login':
user = authenticate(
username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'],
provider_name = provider_name,
method = 'password'
)
if user is None:
login_form.set_password_login_error()
else:
login(request, user)
#todo: here we might need to set cookies
#for external login sites
return HttpResponseRedirect(next_url)
elif password_action == 'change_password':
if request.user.is_authenticated():
new_password = \
login_form.cleaned_data['new_password']
AuthBackend.set_password(
user=request.user,
password=new_password,
provider_name=provider_name
)
request.user.message_set.create(
message = _('Your new password is saved')
)
return HttpResponseRedirect(next_url)
else:
logging.critical(
'unknown password action %s' % password_action
)
raise Http404
elif login_form.cleaned_data['login_type'] == 'mozilla-persona':
assertion = login_form.cleaned_data['persona_assertion']
email = util.mozilla_persona_get_email_from_assertion(assertion)
if email:
user = authenticate(email=email, method='mozilla-persona')
if user is None:
user = authenticate(email=email, method='valid_email')
if user:
#create mozilla persona user association
#because we trust the given email address belongs
#to the same user
UserAssociation(
openid_url=email,
user=user,
provider_name='mozilla-persona',
last_used_timestamp=datetime.datetime.now()
).save()
if user:
login(request, user)
return HttpResponseRedirect(next_url)
#else - create new user account
#pre-fill email address with persona registration
request.session['email'] = email
return finalize_generic_signin(
request,
login_provider_name = 'mozilla-persona',
user_identifier = email,
redirect_url = next_url
)
elif login_form.cleaned_data['login_type'] == 'openid':
#initiate communication process
logging.debug('processing signin with openid submission')
#todo: make a simple-use wrapper for openid protocol
sreg_req = sreg.SRegRequest(optional=['nickname', 'email'])
redirect_to = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_signin'),
urllib.urlencode({'next':next_url})
)
return ask_openid(
request,
login_form.cleaned_data['openid_url'],
redirect_to,
on_failure=signin_failure,
sreg_request=sreg_req
)
elif login_form.cleaned_data['login_type'] == 'oauth':
try:
#this url may need to have "next" piggibacked onto
connection = util.OAuthConnection(
provider_name,
callback_url=reverse('user_complete_oauth_signin')
)
connection.start()
request.session['oauth_token'] = connection.get_token()
request.session['oauth_provider_name'] = provider_name
request.session['next_url'] = next_url#special case for oauth
oauth_url = connection.get_auth_url(login_only=True)
return HttpResponseRedirect(oauth_url)
except util.OAuthError, e:
logging.critical(unicode(e))
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': provider_name}
request.user.message_set.create(message = msg)
elif login_form.cleaned_data['login_type'] == 'oauth2':
try:
csrf_token = generate_random_key(length=32)
redirect_url = util.get_oauth2_starter_url(provider_name, csrf_token)
request.session['oauth2_csrf_token'] = csrf_token
request.session['provider_name'] = provider_name
return HttpResponseRedirect(redirect_url)
except util.OAuthError, e:
logging.critical(unicode(e))
msg = _('Unfortunately, there was some problem when '
'connecting to %(provider)s, please try again '
'or use another provider'
) % {'provider': provider_name}
request.user.message_set.create(message = msg)
elif login_form.cleaned_data['login_type'] == 'wordpress_site':
#here wordpress_site means for a self hosted wordpress blog not a wordpress.com blog
wp = Client(
askbot_settings.WORDPRESS_SITE_URL,
login_form.cleaned_data['username'],
login_form.cleaned_data['password']
)
try:
wp_user = wp.call(GetUserInfo())
custom_wp_openid_url = '%s?user_id=%s' % (wp.url, wp_user.user_id)
user = authenticate(
method = 'wordpress_site',
wordpress_url = wp.url,
wp_user_id = wp_user.user_id
)
return finalize_generic_signin(
request = request,
user = user,
user_identifier = custom_wp_openid_url,
login_provider_name = provider_name,
redirect_url = next_url
)
except WpFault, e:
logging.critical(unicode(e))
msg = _('The login password combination was not correct')
request.user.message_set.create(message = msg)
else:
#raise 500 error - unknown login type
pass
else:
logging.debug('login form is not valid')
logging.debug(login_form.errors)
logging.debug(request.REQUEST)
if request.method == 'GET' and request.user.is_authenticated():
view_subtype = 'change_openid'
else:
view_subtype = 'default'
return show_signin_view(
request,
login_form = login_form,
view_subtype = view_subtype,
template_name=template_name
)
@csrf.csrf_protect
def show_signin_view(
request,
login_form = None,
account_recovery_form = None,
account_recovery_message = None,
sticky = False,
view_subtype = 'default',
template_name='authopenid/signin.html'
):
"""url-less utility function that populates
context of template 'authopenid/signin.html'
and returns its rendered output
"""
allowed_subtypes = (
'default', 'add_openid',
'email_sent', 'change_openid',
'bad_key'
)
assert(view_subtype in allowed_subtypes)
if sticky:
next_url = reverse('user_signin')
else:
next_url = get_next_url(request)
if login_form is None:
login_form = forms.LoginForm(initial = {'next': next_url})
if account_recovery_form is None:
account_recovery_form = forms.AccountRecoveryForm()#initial = initial_data)
#if request is GET
if request.method == 'GET':
logging.debug('request method was GET')
#todo: this sthuff must be executed on some signal
#because askbot should have nothing to do with the login app
from askbot.models import AnonymousQuestion as AQ
session_key = request.session.session_key
logging.debug('retrieving anonymously posted question associated with session %s' % session_key)
qlist = AQ.objects.filter(session_key=session_key).order_by('-added_at')
if len(qlist) > 0:
question = qlist[0]
else:
question = None
from askbot.models import AnonymousAnswer as AA
session_key = request.session.session_key
logging.debug('retrieving posted answer associated with session %s' % session_key)
alist = AA.objects.filter(session_key=session_key).order_by('-added_at')
if len(alist) > 0:
answer = alist[0]
else:
answer = None
if request.user.is_authenticated():
existing_login_methods = UserAssociation.objects.filter(user = request.user)
#annotate objects with extra data
providers = util.get_enabled_login_providers()
for login_method in existing_login_methods:
try:
provider_data = providers[login_method.provider_name]
if provider_data['type'] == 'password':
#only external password logins will not be deletable
#this is because users with those can lose access to their accounts permanently
login_method.is_deletable = provider_data.get('password_changeable', False)
else:
login_method.is_deletable = True
except KeyError:
logging.critical(
'login method %s is no longer available '
'please delete records for this login method '
'from the UserAssociation table',
login_method.provider_name
)
continue
if view_subtype == 'default':
page_title = _('Please click any of the icons below to sign in')
elif view_subtype == 'email_sent':
page_title = _('Account recovery email sent')
elif view_subtype == 'change_openid':
if len(existing_login_methods) == 0:
page_title = _('Please add one or more login methods.')
else:
page_title = _('If you wish, please add, remove or re-validate your login methods')
elif view_subtype == 'add_openid':
page_title = _('Please wait a second! Your account is recovered, but ...')
elif view_subtype == 'bad_key':
page_title = _('Sorry, this account recovery key has expired or is invalid')
logging.debug('showing signin view')
data = {
'page_class': 'openid-signin',
'view_subtype': view_subtype, #add_openid|default
'page_title': page_title,
'question': question,
'answer': answer,
'login_form': login_form,
'use_password_login': util.use_password_login(),
'account_recovery_form': account_recovery_form,
'openid_error_message': request.REQUEST.get('msg',''),
'account_recovery_message': account_recovery_message,
'use_password_login': util.use_password_login(),
}
major_login_providers = util.get_enabled_major_login_providers()
minor_login_providers = util.get_enabled_minor_login_providers()
#determine if we are only using password login
active_provider_names = [p['name'] for p in major_login_providers.values()]
active_provider_names.extend([p['name'] for p in minor_login_providers.values()])
have_buttons = True
if (len(active_provider_names) == 1 and active_provider_names[0] == 'local'):
if askbot_settings.SIGNIN_ALWAYS_SHOW_LOCAL_LOGIN == True:
#in this case the form is not using javascript, so set initial values
#here
have_buttons = False
login_form.initial['login_provider_name'] = 'local'
if request.user.is_authenticated():
login_form.initial['password_action'] = 'change_password'
else:
login_form.initial['password_action'] = 'login'
data['have_buttons'] = have_buttons
if request.user.is_authenticated():
data['existing_login_methods'] = existing_login_methods
active_provider_names = [
item.provider_name for item in existing_login_methods
]
util.set_login_provider_tooltips(
major_login_providers,
active_provider_names = active_provider_names
)
util.set_login_provider_tooltips(
minor_login_providers,
active_provider_names = active_provider_names
)
data['major_login_providers'] = major_login_providers.values()
data['minor_login_providers'] = minor_login_providers.values()
return render(request, template_name, data)
@csrf.csrf_exempt
@askbot_decorators.post_only
@askbot_decorators.ajax_login_required
def change_password(request):
form = forms.ChangePasswordForm(request.POST)
data = dict()
if form.is_valid():
request.user.set_password(form.cleaned_data['new_password'])
request.user.save()
data['message'] = _('Your new password is saved')
else:
data['errors'] = form.errors
return HttpResponse(simplejson.dumps(data), content_type='application/json')
@login_required
def delete_login_method(request):
if askbot_settings.ALLOW_ADD_REMOVE_LOGIN_METHODS == False:
raise Http404
if request.is_ajax() and request.method == 'POST':
provider_name = request.POST['provider_name']
try:
login_method = UserAssociation.objects.get(
user = request.user,
provider_name = provider_name
)
login_method.delete()
return HttpResponse('', mimetype = 'application/json')
except UserAssociation.DoesNotExist:
#error response
message = _('Login method %(provider_name)s does not exist')
return HttpResponse(message, status=500, mimetype = 'application/json')
except UserAssociation.MultipleObjectsReturned:
logging.critical(
'have multiple %(provider)s logins for user %(id)s'
) % {'provider':provider_name, 'id': request.user.id}
message = _('Oops, sorry - there was some error - please try again')
return HttpResponse(message, status=500, mimetype = 'application/json')
else:
raise Http404
def complete_signin(request):
""" in case of complete signin with openid """
logging.debug('')#blank log just for the trace
return complete(
request,
on_success = signin_success,
on_failure = signin_failure,
return_to = get_url_host(request) + reverse('user_complete_signin')
)
def signin_success(request, identity_url, openid_response):
"""
this is not a view, has no url pointing to this
this function is called when OpenID provider returns
successful response to user authentication
Does actual authentication in Django site and
redirects to the registration page, if necessary
or adds another login method.
"""
logging.debug('')
openid_data = util.from_openid_response(openid_response) #create janrain OpenID object
request.session['openid'] = openid_data
openid_url = str(openid_data)
user = authenticate(
openid_url = openid_url,
method = 'openid'
)
next_url = get_next_url(request)
provider_name = util.get_provider_name(openid_url)
request.session['email'] = openid_data.sreg.get('email', '')
request.session['username'] = openid_data.sreg.get('nickname', '')
return finalize_generic_signin(
request = request,
user = user,
user_identifier = openid_url,
login_provider_name = provider_name,
redirect_url = next_url
)
def finalize_generic_signin(
request = None,
user = None,
login_provider_name = None,
user_identifier = None,
redirect_url = None
):
"""non-view function
generic signin, run after all protocol-dependent details
have been resolved
"""
if 'in_recovery' in request.session:
del request.session['in_recovery']
redirect_url = getattr(django_settings, 'LOGIN_REDIRECT_URL', None)
if redirect_url is None:
redirect_url = reverse('questions')
if request.user.is_authenticated():
#this branch is for adding a new association
if user is None:
try:
#see if currently logged in user has login with the given provider
assoc = UserAssociation.objects.get(
user=request.user,
provider_name=login_provider_name
)
logging.info('switching account or open id changed???')
#did openid url change? or we are dealing with a brand new open id?
message = _(
'If you are trying to sign in to another account, '
'please sign out first. Otherwise, please report the incident '
'to the site administrator.'
)
request.user.message_set.create(message=message)
return HttpResponseRedirect(redirect_url)
except UserAssociation.DoesNotExist:
#register new association
UserAssociation(
user=request.user,
provider_name=login_provider_name,
openid_url=user_identifier,
last_used_timestamp=datetime.datetime.now()
).save()
return HttpResponseRedirect(redirect_url)
elif user != request.user:
#prevent theft of account by another pre-existing user
logging.critical(
'possible account theft attempt by %s,%d to %s %d' % \
(
request.user.username,
request.user.id,
user.username,
user.id
)
)
logout(request)#log out current user
login(request, user)#login freshly authenticated user
return HttpResponseRedirect(redirect_url)
else:
#user just checks if another login still works
msg = _('Your %(provider)s login works fine') % \
{'provider': login_provider_name}
request.user.message_set.create(message = msg)
return HttpResponseRedirect(redirect_url)
elif user:
#login branch
login(request, user)
logging.debug('login success')
return HttpResponseRedirect(redirect_url)
else:
#need to register
request.method = 'GET'#this is not a good thing to do
#but necessary at the moment to reuse the register()
#method
return register(
request,
login_provider_name=login_provider_name,
user_identifier=user_identifier
)
@not_authenticated
@csrf.csrf_protect
@fix_recaptcha_remote_ip
def register(request, login_provider_name=None, user_identifier=None):
"""
this function is used via it's own url with request.method=POST
or as a simple function call from "finalize_generic_signin"
in which case request.method must ge 'GET'
and login_provider_name and user_identifier arguments must not be None
user_identifier will be stored in the UserAssociation as openid_url
login_provider_name - as provider_name
this function may need to be refactored to simplify the usage pattern
template : authopenid/complete.html
"""
logging.debug('')
next_url = get_next_url(request)
user = None
username = request.session.get('username', '')
email = request.session.get('email', '')
logging.debug('request method is %s' % request.method)
form_class = forms.get_registration_form_class()
register_form = form_class(
initial={
'next': next_url,
'username': request.session.get('username', ''),
'email': request.session.get('email', ''),
}
)
if request.method == 'GET':
assert(login_provider_name is not None)
assert(user_identifier is not None)
#store this data into the session
#to persist for the post request
request.session['login_provider_name'] = login_provider_name
request.session['user_identifier'] = user_identifier
elif request.method == 'POST':
if 'login_provider_name' not in request.session \
or 'user_identifier' not in request.session:
logging.critical('illegal attempt to register')
return HttpResponseRedirect(reverse('user_signin'))
#load this data from the session
user_identifier = request.session['user_identifier']
login_provider_name = request.session['login_provider_name']
logging.debug('trying to create new account associated with openid')
form_class = forms.get_registration_form_class()
register_form = form_class(request.POST)
if not register_form.is_valid():
logging.debug('registration form is INVALID')
else:
username = register_form.cleaned_data['username']
email = register_form.cleaned_data['email']
if 'ldap_user_info' in request.session:
user_info = request.session['ldap_user_info']
#we take this info from the user input where
#they can override the default provided by LDAP
user_info['django_username'] = username
user_info['email'] = email
user = ldap_create_user(user_info).user
user = authenticate(user_id=user.id, method='force')
del request.session['ldap_user_info']
login(request, user)
cleanup_post_register_session(request)
return HttpResponseRedirect(next_url)
elif askbot_settings.REQUIRE_VALID_EMAIL_FOR == 'nothing':
user = create_authenticated_user_account(
username=username,
email=email,
user_identifier=user_identifier,
login_provider_name=login_provider_name,
)
login(request, user)
cleanup_post_register_session(request)
return HttpResponseRedirect(next_url)
else:
email_verifier = UserEmailVerifier(key=generate_random_key())
email_verifier.value = {'username': username, 'email': email,
'user_identifier': user_identifier,
'login_provider_name': login_provider_name}
email_verifier.save()
send_email_key(email, email_verifier.key,
handler_url_name='verify_email_and_register')
redirect_url = reverse('verify_email_and_register') + '?next=' + next_url
return HttpResponseRedirect(redirect_url)
providers = {
'yahoo':'<font color="purple">Yahoo!</font>',
'flickr':'<font color="#0063dc">flick</font><font color="#ff0084">r</font>™',
'google':'Google™',
'aol':'<font color="#31658e">AOL</font>',
'myopenid':'MyOpenID',
}
if login_provider_name not in providers:
provider_logo = login_provider_name
else:
provider_logo = providers[login_provider_name]
logging.debug('printing authopenid/complete.html output')
data = {
'openid_register_form': register_form,
'default_form_action': django_settings.LOGIN_URL,
'provider':mark_safe(provider_logo),
'username': username,
'email': email,
'login_type':'openid',
'gravatar_faq_url':reverse('faq') + '#gravatar',
}
return render(request, 'authopenid/complete.html', data)
def signin_failure(request, message):
"""
falure with openid signin. Go back to signin page.
"""
request.user.message_set.create(message = message)
return show_signin_view(request)
@not_authenticated
@csrf.csrf_protect
def verify_email_and_register(request):
"""for POST request - check the validation code,
and if correct - create an account an log in the user
for GET - give a field to paste the activation code
and a button to send another validation email.
"""
presented_code = request.REQUEST.get('validation_code', None)
if presented_code:
try:
#we get here with post if button is pushed
#or with "get" if emailed link is clicked
email_verifier = UserEmailVerifier.objects.get(key=presented_code)
#verifies that the code has not been used already
assert(email_verifier.verified == False)
assert(email_verifier.has_expired() == False)
username = email_verifier.value['username']
email = email_verifier.value['email']
password = email_verifier.value.get('password', None)
user_identifier = email_verifier.value.get('user_identifier', None)
login_provider_name = email_verifier.value.get('login_provider_name', None)
if password:
user = create_authenticated_user_account(
username=username,
email=email,
password=password,
)
elif user_identifier and login_provider_name:
user = create_authenticated_user_account(
username=username,
email=email,
user_identifier=user_identifier,
login_provider_name=login_provider_name,
)
else:
raise NotImplementedError()
login(request, user)
email_verifier.verified = True
email_verifier.save()
cleanup_post_register_session(request)
return HttpResponseRedirect(get_next_url(request))
except Exception, e:
message = _(
'Sorry, registration failed. '
'The token can be already used or has expired. Please try again'
)
request.user.message_set.create(message=message)
return HttpResponseRedirect(reverse('index'))
else:
data = {'page_class': 'validate-email-page'}
return render(request, 'authopenid/verify_email.html', data)
@not_authenticated
@decorators.valid_password_login_provider_required
@csrf.csrf_protect
@fix_recaptcha_remote_ip
def signup_with_password(request):
"""Create a password-protected account
template: authopenid/signup_with_password.html
"""
logging.debug(get_request_info(request))
login_form = forms.LoginForm(initial = {'next': get_next_url(request)})
#this is safe because second decorator cleans this field
provider_name = request.REQUEST['login_provider']
if askbot_settings.USE_RECAPTCHA:
RegisterForm = forms.SafeClassicRegisterForm
else:
RegisterForm = forms.ClassicRegisterForm
logging.debug('request method was %s' % request.method)
if request.method == 'POST':
form = RegisterForm(request.POST)
#validation outside if to remember form values
logging.debug('validating classic register form')
form1_is_valid = form.is_valid()
if form1_is_valid:
logging.debug('classic register form validated')
else:
logging.debug('classic register form is not valid')
if form1_is_valid:
logging.debug('both forms are valid')
next = form.cleaned_data['next']
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
email = form.cleaned_data['email']
if askbot_settings.REQUIRE_VALID_EMAIL_FOR == 'nothing':
user = create_authenticated_user_account(
username=username,
email=email,
password=password,
)
login(request, user)
cleanup_post_register_session(request)
return HttpResponseRedirect(get_next_url(request))
else:
email_verifier = UserEmailVerifier(key=generate_random_key())
email_verifier.value = {'username': username,
'login_provider_name': provider_name,
'email': email, 'password': password}
email_verifier.save()
send_email_key(email, email_verifier.key,
handler_url_name='verify_email_and_register')
redirect_url = reverse('verify_email_and_register') + \
'?next=' + get_next_url(request)
return HttpResponseRedirect(redirect_url)
else:
#todo: this can be solved with a decorator, maybe
form.initial['login_provider'] = provider_name
logging.debug('create classic account forms were invalid')
else:
#todo: here we have duplication of get_password_login_provider...
form = RegisterForm(
initial={
'next': get_next_url(request),
'login_provider': provider_name
}
)
logging.debug('printing legacy signup form')
major_login_providers = util.get_enabled_major_login_providers()
minor_login_providers = util.get_enabled_minor_login_providers()
context_data = {
'form': form,
'page_class': 'openid-signin',
'major_login_providers': major_login_providers.values(),
'minor_login_providers': minor_login_providers.values(),
'login_form': login_form
}
return render(
request,
'authopenid/signup_with_password.html',
context_data
)
#what if request is not posted?
@login_required
def signout(request):
"""
signout from the website. Remove openid from session and kill it.
url : /signout/"
"""
logging.debug('')
try:
logging.debug('deleting openid session var')
del request.session['openid']
except KeyError:
logging.debug('failed')
pass
logout(request)
logging.debug('user logged out')
return HttpResponseRedirect(get_next_url(request))
XRDF_TEMPLATE = """<?xml version='1.0' encoding='UTF-8'?>
<xrds:XRDS
xmlns:xrds='xri://$xrds'
xmlns:openid='http://openid.net/xmlns/1.0'
xmlns='xri://$xrd*($v*2.0)'>
<XRD>
<Service>
<Type>http://specs.openid.net/auth/2.0/return_to</Type>
<URI>%(return_to)s</URI>
</Service>
</XRD>
</xrds:XRDS>"""
def xrdf(request):
url_host = get_url_host(request)
return_to = "%s%s" % (url_host, reverse('user_complete_signin'))
return HttpResponse(XRDF_TEMPLATE % {'return_to': return_to})
def set_new_email(user, new_email):
if new_email != user.email:
user.email = new_email
user.email_isvalid = False
user.save()
def send_email_key(email, key, handler_url_name='user_account_recover'):
"""private function. sends email containing validation key
to user's email address
"""
subject = _("Recover your %(site)s account") % \
{'site': askbot_settings.APP_SHORT_NAME}
data = {
'site_name': askbot_settings.APP_SHORT_NAME,
'validation_link': site_url(reverse(handler_url_name)) + \
'?validation_code=' + key
}
template = get_template('authopenid/email_validation.html')
message = template.render(data)#todo: inject language preference
send_mail(subject, message, django_settings.DEFAULT_FROM_EMAIL, [email])
def send_user_new_email_key(user):
user.email_key = generate_random_key()
user.save()
send_email_key(user.email, user.email_key)
def account_recover(request):
"""view similar to send_email_key, except
it allows user to recover an account by entering
his/her email address
this view will both - send the recover link and
process it
url name 'user_account_recover'
"""
if not askbot_settings.ALLOW_ACCOUNT_RECOVERY_BY_EMAIL:
raise Http404
if request.method == 'POST':
form = forms.AccountRecoveryForm(request.POST)
if form.is_valid():
user = form.cleaned_data['user']
send_user_new_email_key(user)
message = _(
'Please check your email and visit the enclosed link.'
)
return show_signin_view(
request,
account_recovery_message = message,
view_subtype = 'email_sent'
)
else:
return show_signin_view(
request,
account_recovery_form = form
)
else:
key = request.GET.get('validation_code', None)
if key is None:
return HttpResponseRedirect(reverse('user_signin'))
user = authenticate(email_key = key, method = 'email')
if user:
if request.user.is_authenticated():
if user != request.user:
logout(request)
login(request, user)
else:
login(request, user)
from askbot.models import greet_new_user
greet_new_user(user)
#need to show "sticky" signin view here
request.session['in_recovery'] = True
return show_signin_view(
request,
view_subtype = 'add_openid',
sticky = True
)
else:
return show_signin_view(request, view_subtype = 'bad_key')
return HttpResponseRedirect(get_next_url(request))
| coffenbacher/askbot-devel | askbot/deps/django_authopenid/views.py | Python | gpl-3.0 | 56,136 | [
"VisIt"
] | d150af7d91a6b46eafedad91b2cf48dec13315a1971f72c6d60dc303f466e8f9 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Example script, part of MDAnalysis
"""
Example: Comparing a trajectories from different methods
========================================================
Example implementation of Path Similarity Analysis that shows how to read in a
set of trajectories, compute (discrete) Fréchet distances, and plot a heat
map-dendrogram.
This example uses the apo AdK transition between its open and closed crystal
structures as a testbed system (see [Seyler2014]). Trajectories are generated
given the known structural endpoints. A selection of ten different sampling
methods (three transitions each), plus the path of linear interpolation, were
used to generate a total of 31 transitions closed to open transition paths. The
(discrete) Fréchet distances are computed (between each unique pair of
trajectories) and stored in a distance matrix. (See [Seyler2015] for further
applications of and information on PSA.
The distance matrix is stored in a data file `discrete_frechet.dat` and a numpy
file `discrete_frechet.npy`, and the heat map-dendrogram showing Ward
hierarchical clustering of the distance matrix is also written to
`psadata/plots/df_war_psa-short.pdf` (requires :mod:`matplotlib`).
[Seyler2014] S.L. Seyler and O. Beckstein, Sampling large conformational
transitions: adenylate kinase as a testing ground. Mol Simul 40
(2014), 855–877, doi:10.1080/08927022.2014.919497
[Seyler2015] S.L. Seyler, A. Kumar, M.F. Thorpe, and O. Beckstein, Path
Similarity Analysis: a Method for Quantifying Macromolecular
Pathways. `arXiv:1505.04807v1`_ [q-bio.QM], 2015.
.. SeeAlso:: :mod:`MDAnalysis.analysis.psa`
"""
from MDAnalysis import Universe
from MDAnalysis.analysis.psa import PSAnalysis
from pair_id import PairID
if __name__ == '__main__':
print("Building collection of simulations...")
method_names = ['DIMS', 'FRODA', 'GOdMD', 'MDdMD', 'rTMD-F', 'rTMD-S', \
'ANMP', 'iENM', 'MAP', 'MENM-SD', 'MENM-SP', \
'Morph', 'LinInt']
labels = [] # Heat map labels
simulations = [] # List of simulation topology/trajectory filename pairs
universes = [] # List of MDAnalysis Universes representing simulations
# Build list of simulations, each represented by a pair of filenames
# ([topology filename], [trajectory filename]). Generate corresponding label
# list.
for method in method_names:
# Note: DIMS uses the PSF topology format
topname = 'top.psf' if 'DIMS' in method or 'TMD' in method else 'top.pdb'
pathname = 'fitted_psa.dcd'
method_dir = 'methods/{}'.format(method)
if method is not 'LinInt':
for run in xrange(1, 4): # 3 runs per method
run_dir = '{}/{:03n}'.format(method_dir, run)
topology = '{}/{}'.format(method_dir, topname)
trajectory = '{}/{}'.format(run_dir, pathname)
labels.append(method + '(' + str(run) + ')')
simulations.append((topology, trajectory))
else: # only one LinInt trajectory
topology = '{}/{}'.format(method_dir, topname)
trajectory = '{}/{}'.format(method_dir, pathname)
labels.append(method)
simulations.append((topology, trajectory))
# Generate simulation list represented as Universes. Each item, sim, in
# simulations is a topology/trajectory filename pair that is unpacked into
# an argument list with the "splat" ("*") operator.
for sim in simulations:
universes.append(Universe(*sim))
print("Initializing Path Similarity Analysis...")
psa_hpa = PSAnalysis(universes, path_select='name CA', labels=labels)
print("Generating Path objects from trajectories...")
psa_hpa.generate_paths()
print("Performing full Hausdorff pairs analysis for all pairs of paths...")
psa_hpa.run_pairs_analysis(neighbors=True, hausdorff_pairs=True)
#------------------------------------------------
# Generate nearest neighbor distance plots
#------------------------------------------------
# Add three runs per method, except for LinInt (only has one)
identifier = PairID()
for name in method_names:
run_ids = [1] if 'LinInt' in name else [1,2,3]
identifier.add_sim(name, run_ids)
# Get the PairID:
# The comparison between a pair of simulations is assigned a unique
# PairID. Given the order in which simulations are added to PSA, the
# comparison between a pair of simulations can be identified by
# (distance) matrix indices. The PairID is the index in the corresponding
# distance vector of a given pair of simulations.
s1, s2, s3 = 'DIMS 1', 'DIMS 2', 'rTMD-F 3'
pid1 = identifier.get_pair_id(s1, s2)
pid2 = identifier.get_pair_id(s2, s3)
print("Plotting nearest neighbors as a function of normalized progress (by" \
+ " frame for:")
print(" 1. comparison {:d}, {} to {}...".format(pid1, s1, s2))
psa_hpa.plot_nearest_neighbors(filename='nn_dims1_dims2.pdf', idx=pid1, \
labels=(s1, s2))
print(" 2. comparison {:d}, {} to {}...".format(pid2, s2, s3))
psa_hpa.plot_nearest_neighbors(filename='nn_dims2_tmds3.pdf', idx=pid2, \
labels=(s2, s3))
| Becksteinlab/PSAnalysisTutorial | psa_hausdorff-pairs.py | Python | gpl-3.0 | 5,431 | [
"CRYSTAL",
"MDAnalysis"
] | 767cc82b4cd05836007fd59c897e6c105a735819c48d916d263b2a11e7639364 |
#! /usr/bin/env python
import os, sys, glob, subprocess
from setuptools import setup, find_packages, Command
PACKAGENAME = 'spiralgalaxygame'
INSTALL_REQUIRES = [
'preconditions >= 0.1',
'twisted >= 14.0',
'txpostgres >= 1.2.0',
'psycopg2 >= 2.5.2',
'mock >= 1.0.1',
]
def main(args = sys.argv[1:]):
setup(
name=PACKAGENAME,
description='Spiral Galaxy Game',
url='https://github.com/nejucomo/{0}'.format(PACKAGENAME),
license='GPLv3',
version='0.1.dev0',
author='Nathan Wilcox',
author_email='[email protected]',
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
entry_points = {
'console_scripts': [
'sgg-{1} = {0}.app.{2}:main'.format(PACKAGENAME, n.replace('_', '-'), n)
for n in [
os.path.basename(n)[:-3]
for n in glob.glob('{0}/app/*.py'.format(PACKAGENAME))
if not n.endswith('__init__.py')
]
],
},
package_data = {
PACKAGENAME: [
'web/static/*',
'sql/*',
]
},
cmdclass={
'test': TestWithCoverageAndTrialInAVirtualEnvCommand,
'test_integration': TestIntegrationCommand,
},
)
class VirtualEnvCommandBase (Command):
"""A base command class for setup subcommands to be run within a virtual env."""
TestToolRequirements = [] # Subclasses should override this with tools they require.
user_options = [
]
def __init__(self, dist):
Command.__init__(self, dist)
join = os.path.join
self.basedir = os.path.dirname(os.path.abspath(__file__))
self.pymod = join(self.basedir, PACKAGENAME)
self.testdir = join(self.basedir, 'build', 'test')
self.venvdir = join(self.testdir, 'venv')
self.bindir = os.path.join(self.venvdir, 'bin')
self.trial = os.path.join(self.bindir, 'trial')
self.pip = os.path.join(self.bindir, 'pip')
self.coverage = os.path.join(self.bindir, 'coverage')
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._initialize_virtualenv()
self._install_testing_tools()
# Coverage and trial dump things into cwd, so cd:
os.chdir(self.testdir)
self.run_within_virtualenv()
def _initialize_virtualenv(self):
run('virtualenv', '--no-site-packages', self.venvdir)
def _install_testing_tools(self):
reqspath = os.path.join(self.testdir, 'test-tool-requirements.txt')
with file(reqspath, 'w') as f:
for req in INSTALL_REQUIRES + self.TestToolRequirements:
f.write(req + '\n')
run(self.pip, 'install', '--use-mirrors', '--requirement', reqspath)
class TestWithCoverageAndTrialInAVirtualEnvCommand (VirtualEnvCommandBase):
"""Run unit tests with coverage analysis and reporting in a virtualenv.
Note: A design goal of this is that all generated files (except for
.pyc files) will appear under ./build so that .gitignore can contain
only ./build and *.pyc, and a clean operation is merely 'rm -r ./build'.
"""
description = __doc__
# Internal settings:
TestToolRequirements = [
'coverage == 3.7.1',
]
def run_within_virtualenv(self):
self._update_python_path()
try:
run(self.coverage, 'run', '--branch', '--source', self.pymod, self.trial, PACKAGENAME)
finally:
run(self.coverage, 'html')
def _update_python_path(self):
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '{0}:{1}'.format(self.basedir, os.environ['PYTHONPATH'])
else:
os.environ['PYTHONPATH'] = self.basedir
class TestIntegrationCommand (VirtualEnvCommandBase):
"""Run live git with an installed git-remote-lafs on the commandline against a lafs-giab configuration."""
description = __doc__
def run_within_virtualenv(self):
url = 'lafs::foo-not-yet-implemented'
run(self.pip, 'uninstall', '--yes', 'git-remote-lafs')
run(self.pip, 'install', self.basedir)
os.environ['PATH'] = '{0}:{1}'.format(self.bindir, os.environ['PATH'])
run('git', 'push', url, url)
def run(*args):
print 'Running: {0!r}'.format(args)
try:
subprocess.check_call(args, shell=False)
except subprocess.CalledProcessError, e:
print 'Process exited with {0!r} exit status.'.format(e.returncode)
raise
if __name__ == '__main__':
main()
| nejucomo/sgg | setup.py | Python | agpl-3.0 | 4,730 | [
"Galaxy"
] | a5b0fdc4f70379580296b794c883425b25ee2c0ec453773d82f91d60d59eb34d |
#!/usr/bin/env python
"""
This file reads arguments and generate vacancy and antisite structures
in intermetallics.
"""
__author__ = "Bharat Medasani, Enze Chen"
__data__ = "Aug 10, 2020"
import os
from argparse import ArgumentParser
from pymatgen.io.vasp.sets import MPMetalRelaxSet
from pymatgen.ext.matproj import MPRester
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import Element
from pymatgen.core.sites import Site
from pymatgen.core.structure import Structure
from pymatgen.analysis.defects.core import Vacancy
from pymatgen.io.vasp.inputs import Kpoints
def get_sc_scale(inp_struct, final_site_no):
lengths = inp_struct.lattice.abc
no_sites = inp_struct.num_sites
mult = (final_site_no/no_sites*lengths[0]*lengths[1]*lengths[2]) ** (1./3)
num_mult = [int(round(mult/l)) for l in lengths]
num_mult = [i if i > 0 else 1 for i in num_mult]
return num_mult
def vac_antisite_def_struct_gen(mpid, mapi_key, cellmax, struct_file=None):
if not mpid and not struct_file:
print ("============\nERROR: Provide an mpid\n============")
return
# Get primitive structure from the Materials Project DB
if not struct_file:
if not mapi_key:
with MPRester() as mp:
struct = mp.get_structure_by_material_id(mpid)
else:
with MPRester(mapi_key) as mp:
struct = mp.get_structure_by_material_id(mpid)
else:
struct = Structure.from_file(struct_file)
sga = SpacegroupAnalyzer(struct)
prim_struct = sga.find_primitive()
#prim_struct_sites = len(prim_struct.sites)
#conv_struct = sga.get_conventional_standard_structure()
#conv_struct_sites = len(conv_struct.sites)
#conv_prim_ratio = int(conv_struct_sites / prim_struct_sites)
# Default VASP settings
def_vasp_incar_param = {'ISIF':2, 'EDIFF':1e-6, 'EDIFFG':0.001,}
kpoint_den = 15000
# Create bulk structure and associated VASP files
sc_scale = get_sc_scale(inp_struct=prim_struct, final_site_no=cellmax)
blk_sc = prim_struct.copy()
blk_sc.make_supercell(scaling_matrix=sc_scale)
site_no = blk_sc.num_sites
# Rescale if needed
while site_no > cellmax:
max_sc_dim = max(sc_scale)
i = sc_scale.index(max_sc_dim)
sc_scale[i] -= 1
blk_sc = prim_struct.copy()
blk_sc.make_supercell(scaling_matrix=sc_scale)
site_no = blk_sc.num_sites
blk_str_sites = set(blk_sc.sites)
custom_kpoints = Kpoints.automatic_density(blk_sc, kppa=kpoint_den)
mpvis = MPMetalRelaxSet(blk_sc, user_incar_settings=def_vasp_incar_param,
user_kpoints_settings=custom_kpoints)
if mpid:
root_fldr = mpid
else:
root_fldr = struct.composition.reduced_formula
fin_dir = os.path.join(root_fldr, 'bulk')
mpvis.write_input(fin_dir)
if not mpid: # write the input structure if mpid is not used
struct.to(fmt='poscar', filename=os.path.join(fin_dir, 'POSCAR.uc'))
# Create each defect structure and associated VASP files
# First find all unique defect sites
symm_struct = SpacegroupAnalyzer(prim_struct).get_symmetrized_structure()
unique_sites = sorted([site[0] for site in symm_struct.equivalent_sites], \
key=lambda s: s.species_string)
for i, site in enumerate(unique_sites):
vac = Vacancy(structure=prim_struct, defect_site=site)
vac_sc = vac.generate_defect_structure(supercell=sc_scale)
# Get vacancy site information
vac_str_sites = set(vac_sc.sites)
vac_sites = blk_str_sites - vac_str_sites
vac_site = next(iter(vac_sites))
site_mult = vac.get_multiplicity()
vac_site_specie = vac_site.specie
vac_symbol = vac_site_specie.symbol
custom_kpoints = Kpoints.automatic_density(vac_sc, kppa=kpoint_den)
mpvis = MPMetalRelaxSet(vac_sc,
user_incar_settings=def_vasp_incar_param,
user_kpoints_settings=custom_kpoints)
vac_dir = 'vacancy_{}_mult-{}_sitespecie-{}'.format(
str(i+1), site_mult, vac_symbol)
fin_dir = os.path.join(root_fldr, vac_dir)
mpvis.write_input(fin_dir)
# Antisites generation at the vacancy site
struct_species = blk_sc.species
for specie in set(struct_species) - set([vac_site_specie]):
specie_symbol = specie.symbol
anti_sc = vac_sc.copy()
anti_sc.append(specie, vac_site.frac_coords)
mpvis = MPMetalRelaxSet(anti_sc,
user_incar_settings=def_vasp_incar_param,
user_kpoints_settings=custom_kpoints)
anti_dir = 'antisite_{}_mult-{}_sitespecie-{}_subspecie-{}'.format(
str(i+1), site_mult, vac_symbol, specie_symbol)
fin_dir = os.path.join(root_fldr, anti_dir)
mpvis.write_input(fin_dir)
def substitute_def_struct_gen(mpid, solute, mapi_key, cellmax,
struct_file=None):
if not mpid and not struct_file:
print ("============\nERROR: Provide an mpid\n============")
return
if not solute:
print ("============\nERROR: Provide solute atom\n============")
return
# Get primitive structure from the Materials Project DB
if not struct_file:
if not mapi_key:
with MPRester() as mp:
struct = mp.get_structure_by_material_id(mpid)
else:
with MPRester(mapi_key) as mp:
struct = mp.get_structure_by_material_id(mpid)
else:
struct = Structure.from_file(struct_file)
if mpid:
root_fldr = mpid
else:
root_fldr = struct.composition.reduced_formula
sga = SpacegroupAnalyzer(struct)
prim_struct = sga.find_primitive()
#prim_struct_sites = len(prim_struct.sites)
#conv_struct = sga.get_conventional_standard_structure()
#conv_struct_sites = len(conv_struct.sites)
#conv_prim_ratio = int(conv_struct_sites / prim_struct_sites)
# Default VASP settings
def_vasp_incar_param = {'ISIF':2, 'EDIFF':1e-6, 'EDIFFG':0.001,}
kpoint_den = 15000
# Create each substitutional defect structure and associated VASP files
sc_scale = get_sc_scale(inp_struct=prim_struct, final_site_no=cellmax)
blk_sc = prim_struct.copy()
blk_sc.make_supercell(scaling_matrix=sc_scale)
site_no = blk_sc.num_sites
# Rescale if needed
while site_no > cellmax:
max_sc_dim = max(sc_scale)
i = sc_scale.index(max_sc_dim)
sc_scale[i] -= 1
blk_sc = prim_struct.copy()
blk_sc.make_supercell(scaling_matrix=sc_scale)
site_no = blk_sc.num_sites
# Create solute structures at vacancy sites
# First find all unique defect sites
blk_str_sites = set(blk_sc.sites)
symm_struct = SpacegroupAnalyzer(prim_struct).get_symmetrized_structure()
unique_sites = sorted([site[0] for site in symm_struct.equivalent_sites], \
key=lambda s: s.species_string)
for i, site in enumerate(unique_sites):
vac = Vacancy(structure=prim_struct, defect_site=site)
vac_sc = vac.generate_defect_structure(supercell=sc_scale)
# Get vacancy site information
vac_str_sites = set(vac_sc.sites)
vac_sites = blk_str_sites - vac_str_sites
vac_site = next(iter(vac_sites))
vac_specie = vac_site.specie.symbol
site_mult = vac.get_multiplicity()
# Solute substitution defect generation at the vacancy site
solute_struct = vac_sc.copy()
solute_struct.append(solute, vac_site.frac_coords)
custom_kpoints = Kpoints.automatic_density(solute_struct,
kppa=kpoint_den)
mpvis = MPMetalRelaxSet(solute_struct,
user_incar_settings=def_vasp_incar_param,
user_kpoints_settings=custom_kpoints)
# Generate VASP directory
sub_def_dir ='solute_{}_mult-{}_sitespecie-{}_subspecie-{}'.format(
str(i+1), site_mult, vac_specie, solute)
fin_dir = os.path.join(root_fldr, sub_def_dir)
mpvis.write_input(fin_dir)
def im_vac_antisite_def_struct_gen():
m_description = 'Command to generate vacancy and antisite defect ' \
'structures for intermetallics.'
parser = ArgumentParser(description=m_description)
parser.add_argument(
"--mpid",
default=None,
type=str.lower,
help="Materials Project id of the intermetallic structure.\n" \
"For more info on Materials Project, please refer to " \
"www.materialsproject.org")
parser.add_argument(
"--struct",
default=None,
type=str,
help="Filename of the intermetallic structure.")
parser.add_argument(
"--mapi_key",
default=None,
help="Your Materials Project REST API key.\n" \
"For more info, please refer to " \
"www.materialsproject.org/open")
parser.add_argument(
"--cellmax",
type=int,
default=128,
help="Maximum number of atoms in supercell.\n" \
"The default is 128\n" \
"Keep in mind the number of atoms in the supercell" \
"may vary from the provided number including the default.")
args = parser.parse_args()
vac_antisite_def_struct_gen(args.mpid, args.mapi_key, args.cellmax,
struct_file=args.struct)
def im_sol_sub_def_struct_gen():
m_description = 'Command to generate solute substitution defect ' \
'structures for intermetallics.'
parser = ArgumentParser(description=m_description)
parser.add_argument(
"--mpid",
default=None,
type=str.lower,
help="Materials Project id of the intermetallic structure.\n" \
"For more info on Materials Project, please refer to " \
"www.materialsproject.org")
parser.add_argument(
"--struct",
default=None,
type=str,
help="Filename of the intermetallic structure." \
"Supported file types include CIF, POSCAR/CONTCAR," \
"CHGCAR, LOCPOT, vasprun.xml, CSSR, Netcdf, and pymatgen JSONs.")
parser.add_argument(
"--solute",
type=str,
help="Solute Element")
parser.add_argument(
"--mapi_key",
default=None,
help="Your Materials Project REST API key.\n" \
"For more info, please refer to " \
"www.materialsproject.org/open")
parser.add_argument(
"--cellmax",
type=int,
default=128,
help="Maximum number of atoms in supercell.\n" \
"The default is 128\n" \
"Keep in mind the number of atoms in the supercell" \
"may vary from the provided number including the default.")
args = parser.parse_args()
substitute_def_struct_gen(args.mpid, args.solute, args.mapi_key,
args.cellmax, struct_file=args.struct)
if __name__ == '__main__':
im_vac_antisite_def_struct_gen()
# im_sol_sub_def_struct_gen()
| pydii/pydii | pydii/scripts/gen_def_structure.py | Python | mit | 11,604 | [
"NetCDF",
"VASP",
"pymatgen"
] | d8e7ee45e80ea8d0d50302b2b4f8076edb2f751d9a608a53bcbc7ce45e8cb62e |
# Python module
import itertools
# Python extension modules (require extension installations)
import numpy as np
from scipy.stats import futil
from scipy.sparse.csgraph import _validation
from scipy.stats import uniform, norm, triang, lognorm, beta
## Sample generator
## * Generates sample multipliers from the specified distribution or bounds for the:
## * Latin Hypercube sampling method for the following distributions:
## 1. Uniform distribution
## 2. Normal (Gaussian) distribution
## 3. Triangular distribution
## 4. Lognormal distribution
## 5. Beta distribution
## * Random (uniform distribution) sampling method
## * Full factorial of low, mid and high values
## * Applies the multipliers to modify a mapped parameter set
class SampleGenerator :
# Method generates Latin Hypercube sampled multipliers for the selected distribution specified for each parameter (via dictionary)
def generateLatinHypercubeSampledMultipliers(self, specification_map, number_samples) :
# Construct sets of random sampled multipliers from the selected distribution for each parameter
multiplier_sets = {}
for key, specification in specification_map.items() :
# Generate stratified random probability values for distribution generation via inverse CDF
stratified_random_probabilities = ((np.array(range(number_samples)) + np.random.random(number_samples))/number_samples)
# Use stratified random probability values to generate stratified samples from selected distribution via inverse CDF
distribution = specification['distribution']
if distribution == 'uniform' :
lower = specification['settings']['lower']
base = specification['settings']['upper'] - lower
multiplier_sets[key] = uniform.ppf(stratified_random_probabilities, loc=lower, scale=base).tolist()
elif distribution == 'normal' :
mean = specification['settings']['mean']
std_dev = specification['settings']['std_dev']
multiplier_sets[key] = norm.ppf(stratified_random_probabilities, loc=mean, scale=std_dev).tolist()
elif distribution == 'triangular' :
a = specification['settings']['a']
base = specification['settings']['b'] - a
c_std = (specification['settings']['c'] - a)/base
multiplier_sets[key] = triang.ppf(stratified_random_probabilities, c_std, loc=a, scale=base).tolist()
elif distribution == 'lognormal' :
lower = specification['settings']['lower']
scale = specification['settings']['scale']
sigma = specification['settings']['sigma']
multiplier_sets[key] = lognorm.ppf(stratified_random_probabilities, sigma, loc=lower, scale=scale).tolist()
elif distribution == 'beta' :
lower = specification['settings']['lower']
base = specification['settings']['upper'] - lower
a = specification['settings']['alpha']
b = specification['settings']['beta']
multiplier_sets[key] = beta.ppf(stratified_random_probabilities, a, b, loc=lower, scale=base).tolist()
# Randomly select from sampled multiplier sets without replacement to form multipliers (dictionaries)
sampled_multipliers = []
for i in range(number_samples) :
sampled_multiplier = {}
for key, multiplier_set in multiplier_sets.items() :
random_index = np.random.randint(len(multiplier_set))
sampled_multiplier[key] = multiplier_set.pop(random_index)
sampled_multipliers.append(sampled_multiplier)
return sampled_multipliers
# Method generates Random sampled multipliers for specified bounds (dictionary)
def generateRandomSampledMultipliers(self, specification_map, number_samples) :
# Generate samples of random multipliers
sampled_multipliers = []
for i in range(number_samples) :
sampled_multiplier = {}
for key, specification in specification_map.items() :
lower_bound = 1 - specification['bound']
upper_bound = 1 + specification['bound']
sampled_multiplier[key] = np.random.uniform(lower_bound, upper_bound)
sampled_multipliers.append(sampled_multiplier)
return sampled_multipliers
# Method generates Full Factorial multipliers (from lower, mid, upper) for specified bounds (dictionary)
def generateFullFactorialMultipliers(self, specification_map) :
# Construct sets of lower, mid, and upper multipliers
lower_mid_upper_sets = []
key_set = [] # maintains key order
for key, specification in specification_map.items() :
lower_bound = 1 - specification['bound']
upper_bound = 1 + specification['bound']
lower_mid_upper_sets.append([lower_bound, 1, upper_bound])
key_set.append(key)
# Generate the cartesian product of the multiplier sets
cartesian_product = list(itertools.product(*lower_mid_upper_sets))
# Map the multiplier sets back to their parameter keys
factorial_multipliers = []
for multiplier_set in cartesian_product :
key_mapped_multiplier = {}
for index, key in enumerate(key_set) :
key_mapped_multiplier[key] = multiplier_set[index]
factorial_multipliers.append(key_mapped_multiplier)
return factorial_multipliers
# Method calculates the lower threshold value given a tail probability for a specified distribution
def lowerThreshold(self, distribution, specification, tail_probability) :
if distribution == 'normal' :
mean = specification['mean']
std_dev = specification['std_dev']
return norm.ppf(tail_probability, loc=mean, scale=std_dev)
elif distribution == 'lognormal' :
lower = specification['lower']
scale = specification['scale']
sigma = specification['sigma']
return lognorm.ppf(tail_probability, sigma, loc=lower, scale=scale)
# Method calculates the upper threshold value given a tail probability for a specified distribution
def upperThreshold(self, distribution, specification, tail_probability) :
return self.lowerThreshold(distribution, specification, 1-tail_probability)
# Method utilises a multiplier to modify parameter values
def multipy(self, parameter_values, multipliers, parameter_data_types={}) :
modified_parameter_values = {}
# Multiply each keyed parameter value by the corresponding multiplier where supplied
for key, multiplier in multipliers.items() :
if type(parameter_values[key]) is dict : # nested
modified_parameter_values[key] = {}
for nested_key, nested_value in parameter_values[key].items() :
modified_parameter_values[key][nested_key] = nested_value*multiplier
if parameter_data_types.has_key(key) :
if parameter_data_types[key] == 'integer' :
modified_parameter_values[key][nested_key] = modified_parameter_values[key][nested_key].round().astype(int)
else :
modified_parameter_values[key] = parameter_values[key]*multiplier
if parameter_data_types.has_key(key) :
if parameter_data_types[key] == 'integer' :
modified_parameter_values[key] = modified_parameter_values[key].round().astype(int)
return modified_parameter_values
# Method generates plot values for the selected distribution specified for each parameter (via dictionary)
def generateDistributionPlotValues(self, specification) :
sample_number = 1000
x_values = []
y_values = []
# Generate plot values from selected distribution via PDF
distribution = specification['distribution']
if distribution == 'uniform' :
lower = specification['settings']['lower']
upper = specification['settings']['upper']
base = upper - lower
incr = base/sample_number
for i in range(sample_number) :
x_values.append(lower+i*incr)
y_values = uniform.pdf(x_values, loc=lower, scale=base).tolist()
elif distribution == 'normal' :
mean = specification['settings']['mean']
std_dev = specification['settings']['std_dev']
x_min = mean - 3*std_dev
x_max = mean + 3*std_dev
incr = (x_max - x_min)/sample_number
for i in range(sample_number) :
x_values.append(x_min+i*incr)
y_values = norm.pdf(x_values, loc=mean, scale=std_dev).tolist()
elif distribution == 'triangular' :
a = specification['settings']['a']
base = specification['settings']['b'] - a
c_std = (specification['settings']['c'] - a)/base
incr = base/sample_number
for i in range(sample_number) :
x_values.append(a+i*incr)
y_values = triang.pdf(x_values, c_std, loc=a, scale=base).tolist()
elif distribution == 'lognormal' :
lower = specification['settings']['lower']
scale = specification['settings']['scale']
sigma = specification['settings']['sigma']
x_max = lognorm.isf(0.01, sigma, loc=lower, scale=scale)
incr = (x_max - lower)/sample_number
for i in range(sample_number) :
x_values.append(lower+i*incr)
y_values = lognorm.pdf(x_values, sigma, loc=lower, scale=scale).tolist()
elif distribution == 'beta' :
lower = specification['settings']['lower']
base = specification['settings']['upper'] - lower
incr = base/sample_number
for i in range(sample_number) :
x_values.append(lower+i*incr)
a = specification['settings']['alpha']
b = specification['settings']['beta']
y_values = beta.pdf(x_values, a, b, loc=lower, scale=base).tolist()
# Remove any nan/inf values
remove_indexes = []
for i in range(sample_number) :
if not np.isfinite(y_values[i]) :
remove_indexes.append(i)
for i in range(len(remove_indexes)) :
x_values = np.delete(x_values, i)
y_values = np.delete(y_values, i)
return { 'x_values' : x_values, 'y_values' : y_values }
# END SampleGenerator
| GlobalEcologyLab/SARDM | SampleGenerator.py | Python | gpl-3.0 | 11,017 | [
"Gaussian"
] | 73a61629b776cd486d71c57ed8014f2833f2c7d10a9c93fa2cd063512958a55c |
from scipy.ndimage import filters
from PIL import Image
#from numpy import *
from pylab import *
def Gausian_response(img,sigma=1):
""" Compute Gaussian response function
for each pixel in a graylevel image. """
# Gausian response
img_sigma = zeros(img.shape)
filters.gaussian_filter(img, (sigma,sigma), (0,0), img_sigma)
return img_sigma
img = array(Image.open('img/graffiti.jpg').convert('L'))
sigma1=1
keypoints=ones(img.shape, dtype=bool)
img_sigma1=Gausian_response(img,sigma1)
for i in range(4):
img_sigma1=Gausian_response(img,sigma1)
sigma2=sigma1*1.414
img_sigma2=Gausian_response(img,sigma2)
keypoints[keypoints]=(img_sigma2[keypoints]-img_sigma1[keypoints])>25
subplot(1,2,1)
set_cmap('gray')
imshow(img)
title('original image')
subplot(1,2,2)
imshow(keypoints)
title('stable keypoints')
show() | wasit7/cs634 | 2016/lab3_find_stable_keypoints.py | Python | bsd-2-clause | 878 | [
"Gaussian"
] | 1de22a443d075f6b62347bb3b715c8e245eb0cb8f4de4f8a2b3ff386bf9174b7 |
# -*- coding: utf-8 -*-
# coding=utf-8
__author__ = 'ben'
import os
from os import walk
import boto.mturk.connection
from boto.s3.connection import S3Connection
from boto.mturk.qualification import LocaleRequirement, Qualifications, Requirement
import datetime
import csv
import yaml
import sys
import datetime
import pandas as pd
import codecs
from pprint import pprint
sandbox_host = 'mechanicalturk.sandbox.amazonaws.com'
real_host = 'mechanicalturk.amazonaws.com'
host = os.environ['MODA_MTURK_HOST']
hosts={
'sandbox':'mechanicalturk.sandbox.amazonaws.com',
'real':'mechanicalturk.amazonaws.com'
}
phasesQualID = {
'sandbox': {
'practice': '3LJ6LLBDMBQTWUTLG75O5EUQMZM6A6',
'phase1': '3OFCXZK7I1YMQQ45Q5LPJ2OOHCHK93'
},
'real': {
'practice': '3EOSKS3N0DQYQTMKNK1E0HHQOWRVU1',
'phase1': '3874R5DF6Q5C7TEUP9O1NNJXLRMPJ6'
}
}
myWorkerID = {
'sandbox': 'A2SI2XQA7HPR8V',
'real': 'A2SI2XQA7HPR8V'
}
testingQual = '35NJKTSSL0Z7GHLPTM145UTQ6PFZXY'
class MturkTools:
"""Tools for mturk"""
def __init__(self):
self.phase = 'phase1experts4'
self.expert_only = True
self.phase_to_save = {'phase1'}
self.date_str = datetime.date.today().isoformat()
self.path = '/mnt/c/Users/bdyet/GoogleDrive/MODA/DownloadUserData/'
if not os.path.exists(self.path+self.phase):
os.makedirs(self.path+self.phase)
self.url = "https://shrouded-plains-8041.herokuapp.com/"
self.mturk = boto.mturk.connection.MTurkConnection(
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
host=hosts[host],
debug=1 # debug = 2 prints out all requests.
)
self.titles_to_remove = ['Find patterns in sleeping brainwaves (Training HIT)', 'Find patterns in sleeping brainwaves']
print("Welcome to mturk tools, your balance is:")
accountBal = self.mturk.get_account_balance() # [$10,000.00]
print(accountBal)
if self.phase=='sandbox' and accountBal[0] != '10,000.00':
print('Error, your meant to be in sandbox but you are not!')
sys.exit()
def get_all_user_data_from_aws(self):
s3 = S3Connection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])
bucket = s3.get_bucket('moss-assets')
bucket_list = bucket.list()
i=0
for l in bucket_list:
key_string = str(l.key)
if self.expert_only:
str2check = 'UserData_expert'
else:
str2check = 'UserData'
if key_string.find(str2check) != -1:
print('Downloading ' + key_string)
l.get_contents_to_filename(self.path+self.phase + '/' +key_string)
i += 1
print("%i user data files downloaded" % i)
def parse_aws_to_csv(self):
mypath = self.path+self.phase + '/'
for (dirpath, dirnames, filenames) in walk(mypath):
break
with open(mypath+'EventLocations' + self.date_str + '.csv', 'w') as event_loc_csv_file:
event_loc_csv_writer = csv.writer(event_loc_csv_file)
event_loc_csv_writer.writerow(['filename',
'phase',
'subID',
'epochNum',
'blockNum',
'annotatorID',
'MODA_batchNum',
'annotatorEventIndex',
'startPercent',
'durationPercent',
'startSecs',
'durationSecs',
'scoreConfidence',
'TimeWindowFirstShown',
'TimeMarkerCreated',
'TimeMarkerLastModified',
'turkHitId',
'turkAssignmentId'])
with open(mypath+'EpochViews' + self.date_str + '.csv', 'w') as epoch_csv_file:
epoch_csv_writer = csv.writer(epoch_csv_file)
epoch_csv_writer.writerow(['filename',
'epochNum',
'blockNum',
'phase',
'annotatorID',
'hitId',
'assignmentId'])
with open(mypath+'UserStats' + self.date_str + '.csv', 'w') as user_stats_csv_file:
user_stats_csv_writer = csv.writer(user_stats_csv_file)
user_stats_csv_writer.writerow(['userName',
'email',
'fname',
'lname',
'userType',
'userSubType',
'totalSetsScored',
'totalEpochsScored',
'totalMarkersScored',
'RPSGT',
'yearsExperience',
'spindleHoursOverLifetime',
'whyQualified',
'otherComments'])
for userFile in filenames: # collate markers, and collate batches
try:
if not (userFile.find('UserData') > -1):
continue
with open(mypath + '/' + userFile) as userFileHandle:
if userFile == "UserData_preview":
continue
user_data = yaml.safe_load(userFileHandle)
try:
if 'userName' not in user_data:
continue
print("working on user %s" % user_data['userName'])
dataExists = False
epochs_complete = 0
markers_complete = 0
except:
print(userFile)
for phase in user_data['batches']:
if phase not in self.phase_to_save:
continue
sets_comp = user_data['setsCompleted'][phase]
print(" Sets completed in {0}: {1}".format(phase, sets_comp))
for batch in user_data['batches'][phase]:
if batch == 'batchMeta':
continue
for img in user_data['batches'][phase][batch]['imgs']:
img_data = user_data['batches'][phase][batch]['imgs'][img]
if len(img_data['markers']) > 0 or img_data['noMarkers'] == 'true' or ('mturkInfo' in img_data):
dataExists = True
epochs_complete += 1
if user_data['userType'] == 'mturker':
assignment_id = img_data['mturkInfo']['assignmentId']
hit_id = img_data['mturkInfo']['hitId']
else:
hit_id = None
assignment_id = None
epoch_csv_writer.writerow([img_data['filename'],
img_data['epoch'],
img_data['batch'],phase,
user_data['userName'],
hit_id,
assignment_id])
for marker in img_data['markers']:
if marker['gs'] == 'true' or marker['deleted'] == 'true':
continue
markers_complete += 1
event_loc_csv_writer.writerow([img_data['filename'],
phase,
img_data['subID'],
img_data['epoch'],
img_data['batch'],
user_data['userName'],
batch,
marker['markerIndex'],
marker['xP'],
marker['wP'],
marker['xSecs'],
marker['wSecs'],
marker['conf'],
marker['imgFirstShown'],
marker['markerCreated'],
marker['timeStamp'],
hit_id,
assignment_id])
if not dataExists:
print("ERROR, %s has a file but did not complete any images. " % user_data['userName'])
except:
print("Massive Error somewhere with {0}".format(user_data['userName']))
if user_data['userType'] == 'mturker':
user_subtype = None
rpsgt = None
email = None
years_experience = None
spindle_hours_over_lifetime = None
why_qualified = None
other_comments = None
else:
email = user_data['registerData']['email']
other_comments = user_data['registerData']['comments']
if 'RPSGTNum' in user_data['registerData']:
user_subtype = 'psgTech'
rpsgt = user_data['registerData']['RPSGTNum']
years_experience = user_data['registerData']['yearsExperience']
spindle_hours_over_lifetime = None
why_qualified = None
elif 'other' in user_data['registerData']:
user_subtype = 'other'
why_qualified = user_data['registerData']['other']
rpsgt = None
years_experience = user_data['registerData']['yearsExperience']
spindle_hours_over_lifetime = user_data['registerData']['timeWorked']
else:
user_subtype = 'researcher'
spindle_hours_over_lifetime = user_data['registerData']['timeWorked']
rpsgt = None
years_experience = user_data['registerData']['yearsExperience']
why_qualified = None
if spindle_hours_over_lifetime is not None:
try:
spindle_hours_over_lifetime = unicode(spindle_hours_over_lifetime.strip(codecs.BOM_UTF8),'utf-8')
except:
spindle_hours_over_lifetime = 'conversion error'
if why_qualified is not None:
try:
why_qualified = unicode(why_qualified.strip(codecs.BOM_UTF8), 'utf-8')
except:
why_qualified = 'conversion error'
if other_comments is not None:
try:
other_comments = unicode(other_comments.strip(codecs.BOM_UTF8), 'utf-8')
except:
other_comments = 'conversion error'
if 'fname' in user_data:
fname = user_data['fname']
else:
fname = 'missing'
if 'lname' in user_data:
lname = user_data['lname']
else:
lname = 'missing'
user_stats_csv_writer.writerow([user_data['userName'],
email,
fname,
lname,
user_data['userType'],
user_subtype,
sets_comp,
epochs_complete,
markers_complete,
rpsgt,
years_experience,
spindle_hours_over_lifetime,
why_qualified,
other_comments])
user_stats_csv_file.close()
epoch_csv_file.close()
event_loc_csv_file.close()
def save_mturk_data(self):
hits = self.get_all_reviewable_hits()
try:
workerResultData = pd.read_csv(self.path+self.phase + "/WorkerResultData.csv", sep=',')
except:
workerResultData = pd.DataFrame(columns={'workerId', 'viewedImgs', 'numViewed', 'numHits', 'browser'})
for hit in hits:
assignments = self.mturk.get_assignments(hit.HITId)
for assignment in assignments:
print("Answers of the worker %s" % assignment.WorkerId)
for answer in assignment.answers:
for idx, ans in enumerate(answer):
if idx == 2:
for viewedImg in ans.fields:
browser = viewedImg
print(browser)
elif idx == 3:
for viewedImg in ans.fields:
print(viewedImg)
viewedImg = viewedImg.split(',')
if len(viewedImg)<1 or viewedImg==None:
print("Missing DATA for {0}".format(assignment.WorkerId))
print(viewedImg)
continue
if assignment.WorkerId not in workerResultData['workerId'].values:
ser = pd.Series([assignment.WorkerId, viewedImg, len(viewedImg), 1, browser], index=['workerId','viewedImgs','numViewed','numHits','browser'])
workerResultData = workerResultData.append(ser, ignore_index=True)
else:
currentData = workerResultData.loc[workerResultData['workerId']==assignment.WorkerId, 'viewedImgs']
currentNumViewed = workerResultData.loc[workerResultData['workerId']==assignment.WorkerId, 'numViewed']
currentNumHits = workerResultData.loc[workerResultData['workerId']==assignment.WorkerId, 'numHits']
if not set(viewedImg).issubset(currentData.values[0]):
currentDataValue = currentData.values[0]
if isinstance(currentDataValue, basestring):
currentDataValue = currentDataValue.split(',')
workerLoc = workerResultData['workerId']==assignment.WorkerId
currentDataValue.extend(viewedImg)
workerResultData.loc[workerLoc, 'viewedImgs'] = [currentDataValue]
workerResultData.loc[workerLoc, 'numViewed'] = currentNumViewed+len(viewedImg)
workerResultData.loc[workerLoc, 'numHits'] = currentNumHits+1
workerResultData.to_csv(self.path+self.phase + "/WorkerResultData.csv")
def get_all_reviewable_hits(self):
page_size = 50
hits = self.mturk.get_reviewable_hits(page_size=page_size)
print("Total results to fetch %s " % hits.TotalNumResults)
print("Request hits page %i" % 1)
total_pages = float(hits.TotalNumResults)/page_size
int_total = int(total_pages)
if total_pages - int_total > 0:
total_pages = int_total+1
else:
total_pages = int_total
pn = 1
while pn < total_pages:
pn += 1
print("Request hits page %i" % pn)
temp_hits = self.mturk.get_reviewable_hits(page_size=page_size,page_number=pn)
hits.extend(temp_hits)
return hits
def get_all_hits(self):
return self.mturk.get_all_hits()
def approve_hits(self):
reviewable_hits = self.get_all_reviewable_hits()
for hit in reviewable_hits:
assignments = self.mturk.get_assignments(hit.HITId)
for assignment in assignments:
print("Worker %s" % assignment.WorkerId)
try:
self.mturk.approve_assignment(assignment.AssignmentId)
except:
print("already approved")
print("--------------------")
self.mturk.disable_hit(hit.HITId)
def disable_all_hits(self):
allHits = self.mturk.get_all_hits()
for hit in allHits:
if hit.Title in self.titles_to_remove:
print('deleting')
self.mturk.disable_hit(hit.HITId)
def dispose_reviewed_hits(self):
allHits = self.mturk.get_all_hits()
for hit in allHits:
if hit.Title in self.titles_to_remove:
print('disposing')
self.mturk.dispose_hit(hit.HITId)
def expire_remaining_hits(self):
allHits = self.mturk.get_all_hits()
for hit in allHits:
if hit.Title in self.titles_to_remove:
print('expiring {0}'.format(hit.Title))
self.mturk.expire_hit(hit.HITId)
def remove_qualifications(self, phase_type, workers_to_remove='me'):
if workers_to_remove != 'me':
qual_data= self.mturk.get_all_qualifications_for_qual_type(phasesQualID[host][phase_type])
workers = []
for worker in qual_data:
workers.append(worker.SubjectId)
else:
workers = [myWorkerID[host]]
for workerID in workers:
try:
self.mturk.revoke_qualification(workerID, phasesQualID[host][phase_type], reason='Granted in error')
except:
print('worker %s does not have qual' % workerID)
def post_prac_hits(self, num_hits, amount, testing=False):
title = "Find patterns in sleeping brainwaves (Training HIT)"
description = "This is a training hit which will grant you a qualification to complete more HITs." \
"Expected HIT completion time is 12mins (because you have to read instructions etc)," \
" BUT future HITs will be shorter!!!" \
"Your job is to find patterns in recordings of the sleeping brain! Help science understand " \
"sleep and its memory benefits. \n" \
"This project is run by the MODA team at University of California, Riverside." \
"If you would like to find out more about this project please visit our Open Science Project" \
"at https://osf.io/8bma7/ or consider backing our project on " \
"Experiment: https://experiment.com/projects/crowdsourcing-the-analysis-of-sleep-can-the-public-be-sleep-scientists"
keywords = ["sleep", "scoring","spindles","spindle","brainwaves", "MODA", "psych", "annotation"]
frame_height = 800 # the height of the iframe holding the external hit
questionform = boto.mturk.question.ExternalQuestion(self.url + '?currentPhase=practice', frame_height)
quals = Qualifications()
quals.add(Requirement('000000000000000000L0', 'GreaterThanOrEqualTo', '95')) #'Worker_PercentHITsApproved'
quals.add(Requirement(phasesQualID[host]['practice'], 'DoesNotExist'))
quals.add(Requirement(phasesQualID[host]['phase1'], 'DoesNotExist'))
if host != 'sandbox':
if testing:
quals.add(Requirement(testingQual, 'Exists'))
else:
quals.add(Requirement('00000000000000000040', 'GreaterThanOrEqualTo', '100')) #'Worker_NumberHITsApproved'
i=0
for i in range(1, num_hits+1):
self.mturk.create_hit(
title=title,
description=description,
keywords=keywords,
question=questionform,
reward=boto.mturk.price.Price(amount=amount),
lifetime=datetime.timedelta(4),
duration=datetime.timedelta(minutes=30),
qualifications=quals,
response_groups=('Minimal', 'HITDetail'), # I don't know what response groups are
)
print('Posted ' + str(i) + ' practice HITS @ $' + str(amount))
def post_futher_hits(self, num_hits, amount, testing=False):
url = "https://shrouded-plains-8041.herokuapp.com/"
title = "Find patterns in sleeping brainwaves"
description = "Expected HIT completion time is ~3 mins.\n\n" \
"Your job is to find patterns in recordings of the sleeping brain! Help science understand " \
"sleep and its memory benefits. \n" \
"This project is run by the MODA team at University of California, Riverside." \
"If you would like to find out more about this project please visit our Open Science Project" \
"at https://osf.io/8bma7/ or consider backing our project on " \
"Experiment: https://experiment.com/projects/crowdsourcing-the-analysis-of-sleep-can-the-public-be-sleep-scientists"
keywords = ["sleep", "scoring", "spindles", "spindle", "brainwaves", "MODA", "psych", "annotation"]
frame_height = 800 # the height of the iframe holding the external hit
questionform = boto.mturk.question.ExternalQuestion(url + '?currentPhase=phase1', frame_height)
quals = Qualifications()
quals.add(Requirement('000000000000000000L0', 'GreaterThanOrEqualTo', '95')) #'Worker_PercentHITsApproved'
quals.add(Requirement(phasesQualID[host]['practice'], 'Exists'))
quals.add(Requirement(phasesQualID[host]['phase1'], 'DoesNotExist'))
if host != 'sandbox':
if testing:
quals.add(Requirement(testingQual, 'Exists'))
else:
quals.add(Requirement('00000000000000000040', 'GreaterThanOrEqualTo', '100')) #'Worker_NumberHITsApproved'
# quals.add(LocaleRequirement('In', ['US','IN'])) #locale
# quals.add(LocaleRequirement('EqualTo', 'IN')) #locale
i = 0
for i in range(1, num_hits+1):
create_hit_result = self.mturk.create_hit(
title=title,
description=description,
keywords=keywords,
question=questionform,
reward=boto.mturk.price.Price(amount=amount),
lifetime=datetime.timedelta(4),
duration=datetime.timedelta(minutes=30),
qualifications=quals,
response_groups=('Minimal', 'HITDetail'), # I don't know what response groups are
)
print('Posted ' + str(i) + ' further HITS @ $' + str(amount))
mtt = MturkTools()
#mtt.post_prac_hits(100, 0.20)
#mtt.post_futher_hits(100, 0.13)
# mtt.expire_remaining_hits()
# mtt.save_mturk_data()
mtt.get_all_user_data_from_aws()
mtt.parse_aws_to_csv()
#mtt.approve_hits()
#mtt.remove_qualifications('practice')
# mtt.mturk.notify_workers('AR72L0JX4D03W',
# 'Spindle Detection on MODA',
# 'Hi There!,'
# 'Thanks for completing spindle detection HITs. '
# 'Unfortunately the data for you HITs is missing. '
# 'This is most likely an error with the spindle detection program. '
# 'Can you help me debug this by replying with your operating system, browser type and version'
# 'and if you saw any strange behaviour in the spindle detection program.')
| bdyetton/MODA | Tools/MturkTools.py | Python | mit | 27,434 | [
"VisIt"
] | ab927917a1e3f2d3695d11297f243949ebb9de9fa029fe3a5c85b35a8f2d670d |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.path.insert(0, os.path.abspath('../../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx_click.ext',
'sphinx_autodoc_typehints',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pybel-tools'
copyright = '2016-2019, Charles Tapley Hoyt'
author = 'Charles Tapley Hoyt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '0.9.2-dev'
parsed_version = re.match(
r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(?:-(?P<release>[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+(?P<build>[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?',
release
)
version = parsed_version.expand(r'\g<major>.\g<minor>.\g<patch>')
if parsed_version.group('release'):
tags.add('prerelease')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybel-toolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybel-tools.tex', 'PyBEL-Tools Documentation',
'Charles Tapley Hoyt', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybel-tools', 'PyBEL-Tools Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybel-tools', 'PyBEL-Tools Documentation',
author, 'pybel-tools', 'Algorithms and tools for PyBEL.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'pybel': ('https://pybel.readthedocs.io/en/latest/', None),
'pandas': ('https://pandas-docs.github.io/pandas-docs-travis/', None)
}
autodoc_member_order = 'bysource'
autoclass_content = 'both'
if os.environ.get('READTHEDOCS', None):
tags.add('readthedocs')
| pybel/pybel-tools | docs/source/conf.py | Python | mit | 4,949 | [
"Pybel"
] | 26bcdd0dd51573997bf9662db84c88476628f77f6aa796400db1f19f48139cc7 |
# Module adapting imagen classes for use with boxflow
#
#
from __future__ import absolute_import
import os
import base64
from PIL import Image
from io import BytesIO
import imagen
from imagen import PatternGenerator, Gaussian
from imagen import image
from imagen import random
from imagen.random import RandomGenerator
from imagen.transferfn import TransferFn
import numpy as np
import copy
import param
import fractions
from numbergen import TimeAware
param.Dynamic.time_fn(val=0.0, time_type=fractions.Fraction)
param.Dynamic.time_dependent = True
TimeAware.time_dependent = True # Why can't I set it on RandomGenerator?
from .inventory import Inventory, BoxType
class Viewport(PatternGenerator):
"""
Trivial wrapper around a pattern generator used to define a viewport
node.
"""
input = param.ClassSelector(class_=PatternGenerator,
default=imagen.Constant(), precedence=1)
x = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
y = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
orientation = param.Number(default=0.0,precedence=-1)
size = param.Number(default=1.0, precedence=-1)
scale = param.Number(default=1.0, precedence=-1)
offset = param.Number(default=0.0,precedence=-1)
output_fns = param.HookList(default=[], precedence=-1)
mask_shape = param.ClassSelector(param.Parameterized, default=None, precedence=-1)
def function(self,p):
return p.input()
class BinaryOp(PatternGenerator):
lhs = param.ClassSelector(class_=PatternGenerator,
default=imagen.Constant(), precedence=1)
rhs = param.ClassSelector(class_=PatternGenerator,
default=imagen.Constant(), precedence=1)
x = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
y = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
orientation = param.Number(default=0.0,precedence=-1)
size = param.Number(default=1.0, precedence=-1)
scale = param.Number(default=1.0, precedence=-1)
offset = param.Number(default=0.0,precedence=-1)
output_fns = param.HookList(default=[], precedence=-1)
mask_shape = param.ClassSelector(param.Parameterized, default=None, precedence=-1)
class Add(BinaryOp):
def function(self,p):
return (p.lhs + p.rhs)()
class Sub(BinaryOp):
def function(self,p):
return (p.lhs - p.rhs)()
class Mul(BinaryOp):
def function(self,p):
return (p.lhs * p.rhs)()
def image_to_base64(arr):
im = Image.fromarray((arr * 255))
buff = BytesIO()
im.convert('RGBA').save(buff, format='png')
buff.seek(0)
b64 = base64.b64encode(buff.read())
return 'data:image/png;base64,' + b64.decode('utf8')
def imagen_display(instance):
"""
Similar to a display hook. Returns a dictionary of extra content if
applicable.
"""
return {'b64':image_to_base64(instance())}
fpath, _ = os.path.split(__file__)
manhattan_path = os.path.abspath(os.path.join(fpath, '..',
'assets', 'manhattan.png'))
class FileImage(image.FileImage):
def __init__(self, *args, **kwargs):
super(FileImage, self).__init__(*args, **dict(kwargs,
filename=manhattan_path))
class Convolve(TransferFn):
"""
Imagen transfer function adapted to work without need sheet coordinates.
"""
kernel_pattern = param.ClassSelector(PatternGenerator,
default=Gaussian(size=0.05,aspect_ratio=1.0), doc="""
The kernel pattern used in the convolution. The default kernel
results in an isotropic Gaussian blur.""")
init_keys = param.List(default=[], constant=True)
def __init__(self, **params):
super(Convolve,self).__init__(**params)
def initialize(self, kernel_xdensity, kernel_ydensity, **kwargs):
super(Convolve, self).initialize(**kwargs)
pattern_copy = copy.deepcopy(self.kernel_pattern)
pattern_copy.set_matrix_dimensions(self.kernel_pattern.bounds,
kernel_xdensity,
kernel_ydensity)
self.kernel = pattern_copy()
def __call__(self, x):
if not hasattr(self, 'kernel'):
raise Exception("Convolve must be initialized before being called.")
fft1 = np.fft.fft2(x)
fft2 = np.fft.fft2(self.kernel, s=x.shape)
convolved_raw = np.fft.ifft2( fft1 * fft2).real
k_rows, k_cols = self.kernel.shape # ORIGINAL
rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)
convolved = rolled / float(self.kernel.sum())
x.fill(0.0)
x+=convolved
class Blur(PatternGenerator):
"""
Trivial wrapper around a pattern generator used to define a viewport
node.
"""
input = param.ClassSelector(class_=PatternGenerator,
default=imagen.Constant(), precedence=1)
blur_amount = param.Integer(default=10,softbounds=(10, 1000),precedence=1)
kernel = param.ClassSelector(PatternGenerator,
default=Gaussian(size=0.05,aspect_ratio=1.0), precedence=-1,
doc="""
The kernel pattern used in the convolution. The default kernel
results in an isotropic Gaussian blur.""")
x = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
y = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
orientation = param.Number(default=0.0,precedence=-1)
size = param.Number(default=1.0, precedence=-1)
scale = param.Number(default=1.0, precedence=-1)
offset = param.Number(default=0.0,precedence=-1)
output_fns = param.HookList(default=[], precedence=-1)
mask_shape = param.ClassSelector(param.Parameterized, default=None, precedence=-1)
def function(self,p):
arr = p.input()
conv = Convolve(kernel_pattern=p.kernel)
conv.initialize(p.blur_amount, p.blur_amount, kernel_pattern=p.kernel)
conv(arr)
return arr
class Invert(PatternGenerator):
"""
Trivial wrapper around a pattern generator used to define a viewport
node.
"""
input = param.ClassSelector(class_=PatternGenerator,
default=imagen.Constant(), precedence=1)
x = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
y = param.Number(default=0.0,softbounds=(-1.0,1.0),precedence=-1)
orientation = param.Number(default=0.0,precedence=-1)
size = param.Number(default=1.0, precedence=-1)
scale = param.Number(default=1.0, precedence=-1)
offset = param.Number(default=0.0,precedence=-1)
output_fns = param.HookList(default=[], precedence=-1)
mask_shape = param.ClassSelector(param.Parameterized, default=None, precedence=-1)
def function(self,p):
arr = p.input()
mina, maxa = arr.min(), arr.max()
zeros = np.zeros(arr.shape)
return (zeros - arr) + maxa
binary_ops = [ BoxType(Sub, untyped=['lhs','rhs']),
BoxType(Mul, untyped = ['lhs','rhs'])]
patterngenerators = [imagen.Disk, imagen.Gaussian, imagen.Line,
imagen.Spiral, imagen.Gabor, imagen.SineGrating,
imagen.ConcentricRings, imagen.Asterisk, FileImage,
imagen.random.GaussianRandom, imagen.random.GaussianCloud,
imagen.random.UniformRandom, imagen.random.UniformRandomInt]
vanilla_classes = [ BoxType(patgen,
nodetype='ImageNode',
display_fn=imagen_display)
for patgen in patterngenerators ]
imageops = [BoxType(Blur, nodetype='ImageNode',
untyped=['input'],
display_fn=imagen_display),
BoxType(Invert,
untyped=['input'])]
def load_imagen():
Inventory.add('imagen', vanilla_classes + binary_ops + imageops )
Inventory.add('imagen', BoxType(Viewport,
nodetype='Viewport',
untyped=['input'],
display_fn=imagen_display))
| ioam/boxflow | boxflow/interface/imagen.py | Python | bsd-3-clause | 8,311 | [
"Gaussian"
] | c963bc611605176c6ab0600b2ae8eb1da236734d12f2853103773a24c23a8190 |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import shutil
import sys
import tarfile
from utils import logger
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import FILE_IN, FILE_OUT
from pycompss.api.task import task
# from pycompss.api.api import compss_wait_on
except ImportError:
logger.warn("[Warning] Cannot import \"pycompss\" API packages.")
logger.warn(" Using mock decorators.")
from utils.dummy_pycompss import FILE_IN, FILE_OUT # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task # pylint: disable=ungrouped-imports
# from utils.dummy_pycompss import compss_wait_on # pylint: disable=ungrouped-imports
from basic_modules.tool import Tool
from basic_modules.metadata import Metadata
from tool.aligner_utils import alignerUtils
from tool.common import common
# ------------------------------------------------------------------------------
class bwaIndexerTool(Tool):
"""
Tool for running indexers over a genome FASTA file
"""
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("BWA Indexer")
Tool.__init__(self)
if configuration is None:
configuration = {}
self.configuration.update(configuration)
@task(file_loc=FILE_IN, idx_out=FILE_OUT)
def bwa_indexer(self, file_loc, idx_out): # pylint: disable=no-self-use
"""
BWA Indexer
Parameters
----------
file_loc : str
Location of the genome assebly FASTA file
idx_out : str
Location of the output index file
Returns
-------
bool
"""
au_handler = alignerUtils()
amb_loc, ann_loc, bwt_loc, pac_loc, sa_loc = au_handler.bwa_index_genome(file_loc)
try:
# tar.gz the index
logger.info("BWA - idx_out", idx_out, idx_out.replace('.tar.gz', ''))
idx_out_pregz = idx_out.replace('.tar.gz', '.tar')
index_dir = idx_out.replace('.tar.gz', '')
os.mkdir(index_dir)
shutil.move(amb_loc, index_dir)
shutil.move(ann_loc, index_dir)
shutil.move(bwt_loc, index_dir)
shutil.move(pac_loc, index_dir)
shutil.move(sa_loc, index_dir)
tar = tarfile.open(idx_out_pregz, "w")
tar.add(index_dir, arcname=os.path.split(index_dir)[1])
tar.close()
except (IOError, OSError) as msg:
logger.fatal("I/O error({0}) - BWA INDEXER: {1}".format(
msg.errno, msg.strerror))
return False
common.zip_file(idx_out_pregz)
shutil.rmtree(index_dir)
return True
def run(self, input_files, input_metadata, output_files):
"""
Function to run the BWA over a genome assembly FASTA file to generate
the matching index for use with the aligner
Parameters
----------
input_files : dict
List containing the location of the genome assembly FASTA file
meta_data : dict
output_files : dict
List of outpout files generated
Returns
-------
output_files : dict
index : str
Location of the index file defined in the input parameters
output_metadata : dict
index : Metadata
Metadata relating to the index file
"""
self.bwa_indexer(
input_files["genome"],
output_files["index"]
)
output_metadata = {
"index": Metadata(
data_type="sequence_mapping_index_bwa",
file_type="TAR",
file_path=output_files["index"],
sources=[input_metadata["genome"].file_path],
taxon_id=input_metadata["genome"].taxon_id,
meta_data={
"assembly": input_metadata["genome"].meta_data["assembly"],
"tool": "bwa_indexer"
}
)
}
return (output_files, output_metadata)
# ------------------------------------------------------------------------------
| Multiscale-Genomics/mg-process-fastq | tool/bwa_indexer.py | Python | apache-2.0 | 5,186 | [
"BWA"
] | 12bca1721455d88dbfe99c3c5bedc6c03015942d582a3c78ea7846b64b1e8645 |
########################################################################
# $HeadURL $
# File: RemoveFile.py
# Author: [email protected]
# Date: 2013/03/25 07:44:19
########################################################################
""" :mod: RemoveFile
================
.. module: RemoveFile
:synopsis: removeFile operation handler
.. moduleauthor:: [email protected]
removeFile operation handler
"""
__RCSID__ = "$Id $"
# #
# @file RemoveFile.py
# @author [email protected]
# @date 2013/03/25 07:44:27
# @brief Definition of RemoveFile class.
# # imports
import os
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
########################################################################
class RemoveFile( DMSRequestOperationsBase ):
"""
.. class:: RemoveFile
remove file operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation to execute
:param str csPath: CS path for this handler
"""
# # call base class ctor
DMSRequestOperationsBase.__init__( self, operation, csPath )
# # gMOnitor stuff goes here
gMonitor.registerActivity( "RemoveFileAtt", "File removals attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RemoveFileOK", "Successful file removals",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RemoveFileFail", "Failed file removals",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # re pattern for not existing files
self.reNotExisting = re.compile( r"(no|not) such file.*", re.IGNORECASE )
def __call__( self ):
""" action for 'removeFile' operation """
# # get waiting files
waitingFiles = self.getWaitingFilesList()
fc = FileCatalog( self.operation.catalogList )
res = fc.getReplicas( [wf.LFN for wf in waitingFiles] )
if not res['OK']:
gMonitor.addMark( "RemoveFileAtt" )
gMonitor.addMark( "RemoveFileFail" )
return res
# We check the status of the SE from the LFN that are successful
# No idea what to do with the others...
succ = res['Value']['Successful']
targetSEs = set( [se for lfn in succ for se in succ[lfn] ] )
if targetSEs:
bannedTargets = self.checkSEsRSS( targetSEs, access = 'RemoveAccess' )
if not bannedTargets['OK']:
gMonitor.addMark( "RemoveFileAtt" )
gMonitor.addMark( "RemoveFileFail" )
return bannedTargets
if bannedTargets['Value']:
return S_OK( "%s targets are banned for removal" % ",".join( bannedTargets['Value'] ) )
# # prepare waiting file dict
toRemoveDict = dict( [ ( opFile.LFN, opFile ) for opFile in waitingFiles ] )
gMonitor.addMark( "RemoveFileAtt", len( toRemoveDict ) )
# # 1st step - bulk removal
self.log.debug( "bulk removal of %s files" % len( toRemoveDict ) )
bulkRemoval = self.bulkRemoval( toRemoveDict )
if not bulkRemoval["OK"]:
self.log.error( "Bulk file removal failed", bulkRemoval["Message"] )
else:
gMonitor.addMark( "RemoveFileOK", len( toRemoveDict ) - len( bulkRemoval["Value"] ) )
toRemoveDict = bulkRemoval["Value"]
# # 2nd step - single file removal
for lfn, opFile in toRemoveDict.items():
self.log.info( "removing single file %s" % lfn )
singleRemoval = self.singleRemoval( opFile )
if not singleRemoval["OK"]:
self.log.error( 'Error removing single file', singleRemoval["Message"] )
gMonitor.addMark( "RemoveFileFail", 1 )
else:
self.log.info( "file %s has been removed" % lfn )
gMonitor.addMark( "RemoveFileOK", 1 )
# # set
failedFiles = [ ( lfn, opFile ) for ( lfn, opFile ) in toRemoveDict.items()
if opFile.Status in ( "Failed", "Waiting" ) ]
if failedFiles:
self.operation.Error = "failed to remove %d files" % len( failedFiles )
return S_OK()
def bulkRemoval( self, toRemoveDict ):
""" bulk removal using request owner DN
:param dict toRemoveDict: { lfn: opFile, ... }
:return: S_ERROR or S_OK( { lfn: opFile, ... } ) -- dict with files still waiting to be removed
"""
bulkRemoval = self.dm.removeFile( toRemoveDict.keys(), force = True )
if not bulkRemoval["OK"]:
error = bulkRemoval["Message"]
self.log.error( "Bulk file removal failed", error )
self.operation.Error = error
for opFile in self.operation:
opFile.Error = error
return bulkRemoval
bulkRemoval = bulkRemoval["Value"]
# # filter results
for lfn, opFile in toRemoveDict.items():
if lfn in bulkRemoval["Successful"]:
opFile.Status = "Done"
elif lfn in bulkRemoval["Failed"]:
error = bulkRemoval["Failed"][lfn]
if type( error ) == dict:
error = ";".join( [ "%s-%s" % ( k, v ) for k, v in error.items() ] )
opFile.Error = error
if self.reNotExisting.search( opFile.Error ):
opFile.Status = "Done"
# # return files still waiting
toRemoveDict = dict( [ ( opFile.LFN, opFile ) for opFile in self.operation if opFile.Status == "Waiting" ] )
return S_OK( toRemoveDict )
def singleRemoval( self, opFile ):
""" remove single file
:param opFile: File instance
"""
# # try to remove with owner proxy
proxyFile = None
if "Write access not permitted for this credential" in opFile.Error:
if "DataManager" in self.shifter:
# # you're a data manager - get proxy for LFN and retry
saveProxy = os.environ["X509_USER_PROXY"]
try:
fileProxy = self.getProxyForLFN( opFile.LFN )
if not fileProxy["OK"]:
opFile.Error = "Error getting owner's proxy : %s" % fileProxy['Message']
else:
proxyFile = fileProxy["Value"]
self.log.info( "Trying to remove file with owner's proxy (file %s)" % proxyFile )
removeFile = self.dm.removeFile( opFile.LFN, force = True )
self.log.always( str( removeFile ) )
if not removeFile["OK"]:
opFile.Error = str( removeFile["Message"] )
if self.reNotExisting.search( str( removeFile["Message"] ).lower() ):
opFile.Status = "Done"
else:
removeFile = removeFile["Value"]
if opFile.LFN in removeFile["Failed"]:
error = removeFile["Failed"][opFile.LFN]
if type( error ) == dict:
error = ";".join( [ "%s-%s" % ( k, v ) for k, v in error.items() ] )
if self.reNotExisting.search( error ):
# This should never happen due to the "force" flag
opFile.Status = "Done"
else:
opFile.Error = error
else:
opFile.Status = "Done"
finally:
if proxyFile:
os.unlink( proxyFile )
# # put back request owner proxy to env
os.environ["X509_USER_PROXY"] = saveProxy
# # file removed? update its status to 'Done'
if opFile.Status == "Done":
return S_OK()
return S_ERROR( opFile.Error )
| vmendez/DIRAC | DataManagementSystem/Agent/RequestOperations/RemoveFile.py | Python | gpl-3.0 | 7,595 | [
"DIRAC"
] | 3f45b997f7d8f0ee48940e4e8235a32659e455f83366efa10e24bb781b7803d2 |
#!/usr/bin/env python3
# encoding: utf-8
from qcl.ccdata_xyz import ccData_xyz
from qcl.stretch import stretch
from cclib.parser import ccopen
def main():
stretch('product.out')
rfiles = ['r1.out', 'r2.out']
reactants = []
for rfile in rfiles:
tmp = ccopen(rfile).parse()
reactants.append(ccData_xyz(tmp.getattributes(), ccdataconvert=True))
for reactant in reactants:
reactant.build_zmatrix()
reactant.print_gzmat()
if __name__ == '__main__':
main()
| ben-albrecht/qcl | test/tree/a.py | Python | mit | 514 | [
"cclib"
] | 935adf2ad82db1426985e45c4b4aac0397a847807d4783aa2e40b8cdbc82bd8e |
# (c) 2013-2014, Michael DeHaan <[email protected]>
# (c) 2015 Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import json
import os
import shlex
import zipfile
import re
import pkgutil
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.compat.importlib import import_module
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
from ansible.utils.display import Display
try:
import importlib.util
import importlib.machinery
imp = None
except ImportError:
import imp
# if we're on a Python that doesn't have FNFError, redefine it as IOError (since that's what we'll see)
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
%(rlimit)s
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import runpy
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
PY3 = False
else:
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
try:
import coverage
except ImportError:
print('{"msg": "Could not import `coverage` module.", "failed": true}')
sys.exit(1)
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = '''
try:
if PY3:
import importlib.util
if importlib.util.find_spec('coverage') is None:
raise ImportError
else:
import imp
imp.find_module('coverage')
except ImportError:
print('{"msg": "Could not find `coverage` module.", "failed": true}')
sys.exit(1)
'''
ANSIBALLZ_RLIMIT_TEMPLATE = '''
import resource
existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# adjust soft limit subject to existing hard limit
requested_soft = min(existing_hard, %(rlimit_nofile)d)
if requested_soft != existing_soft:
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
except ValueError:
# some platforms (eg macOS) lie about their hard limit
pass
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
# Do this instead of getting site-packages from distutils.sysconfig so we work when we
# haven't been installed
site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.(py|ps1)$' % site_packages)
COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$')
# Detect new-style Python modules by looking for required imports:
# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util]
# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util]
# import ansible.module_utils[.basic]
# from ansible.module_utils[ import basic]
# from ansible.module_utils[.basic import AnsibleModule]
# from ..module_utils[ import basic]
# from ..module_utils[.basic import AnsibleModule]
NEW_STYLE_PYTHON_MODULE_RE = re.compile(
# Relative imports
br'(?:from +\.{2,} *module_utils.* +import |'
# Collection absolute imports:
br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |'
br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|'
# Core absolute imports
br'from +ansible\.module_utils.* +import |'
br'import +ansible\.module_utils\.)'
)
class ModuleDepFinder(ast.NodeVisitor):
def __init__(self, module_fqn, *args, **kwargs):
"""
Walk the ast tree for the python module.
:arg module_fqn: The fully qualified name to reach this module in dotted notation.
example: ansible.module_utils.basic
Save submodule[.submoduleN][.identifier] into self.submodules
when they are from ansible.module_utils or ansible_collections packages
self.submodules will end up with tuples like:
- ('ansible', 'module_utils', 'basic',)
- ('ansible', 'module_utils', 'urls', 'fetch_url')
- ('ansible', 'module_utils', 'database', 'postgres')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible', 'module_utils', 'database', 'postgres', 'quote')
- ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo')
It's up to calling code to determine whether the final element of the
tuple are module names or something else (function, class, or variable names)
.. seealso:: :python3:class:`ast.NodeVisitor`
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
self.module_fqn = module_fqn
def visit_Import(self, node):
"""
Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
for alias in node.names:
if (alias.name.startswith('ansible.module_utils.') or
alias.name.startswith('ansible_collections.')):
py_mod = tuple(alias.name.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
"""
Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname]
Also has to handle relative imports
We save these as interesting submodules when the imported library is in ansible.module_utils
or ansible.collections
"""
# FIXME: These should all get skipped:
# from ansible.executor import module_common
# from ...executor import module_common
# from ... import executor (Currently it gives a non-helpful error)
if node.level > 0:
if self.module_fqn:
parts = tuple(self.module_fqn.split('.'))
if node.module:
# relative import: from .module import x
node_module = '.'.join(parts[:-node.level] + (node.module,))
else:
# relative import: from . import x
node_module = '.'.join(parts[:-node.level])
else:
# fall back to an absolute import
node_module = node.module
else:
# absolute import: from module import x
node_module = node.module
# Specialcase: six is a special case because of its
# import logic
py_mod = None
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node_module.startswith('ansible.module_utils'):
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
# from ansible.module_utils import MODULE1 [,MODULEn] [as asname]
py_mod = tuple(node_module.split('.'))
elif node_module.startswith('ansible_collections.'):
if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module:
# from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
# from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
# FIXME: Unhandled cornercase (needs to be ignored):
# from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER
py_mod = tuple(node_module.split('.'))
else:
# Not from module_utils so ignore. for instance:
# from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER
pass
if py_mod:
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
with open(path, 'rb') as fd:
data = fd.read()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_name = os.path.basename(interpreter).strip()
# FUTURE: add logical equivalence for python3 in the case of py3-only modules
# check for first-class interpreter config
interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
if C.config.get_configuration_definitions().get(interpreter_config_key):
# a config def exists for this interpreter type; consult config for the value
interpreter_out = C.config.get_config_value(interpreter_config_key, variables=task_vars)
discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
interpreter_out = templar.template(interpreter_out.strip())
facts_from_task_vars = task_vars.get('ansible_facts', {})
# handle interpreter discovery if requested
if interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
if discovered_interpreter_config not in facts_from_task_vars:
# interpreter discovery is desired, but has not been run for this host
raise InterpreterDiscoveryRequiredError("interpreter discovery needed",
interpreter_name=interpreter_name,
discovery_mode=interpreter_out)
else:
interpreter_out = facts_from_task_vars[discovered_interpreter_config]
else:
# a config def does not exist for this interpreter type; consult vars for a possible direct override
interpreter_config = u'ansible_%s_interpreter' % interpreter_name
if interpreter_config not in task_vars:
return None, interpreter
interpreter_out = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter_out
if args:
shebang = shebang + u' ' + u' '.join(args)
return shebang, interpreter_out
class ModuleInfo:
def __init__(self, name, paths):
self.py_src = False
self.pkg_dir = False
path = None
if imp is None:
self._info = info = importlib.machinery.PathFinder.find_spec(name, paths)
if info is not None:
self.py_src = os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES
self.pkg_dir = info.origin.endswith('/__init__.py')
path = info.origin
else:
raise ImportError("No module named '%s'" % name)
else:
self._info = info = imp.find_module(name, paths)
self.py_src = info[2][2] == imp.PY_SOURCE
self.pkg_dir = info[2][2] == imp.PKG_DIRECTORY
if self.pkg_dir:
path = os.path.join(info[1], '__init__.py')
else:
path = info[1]
self.path = path
def get_source(self):
if imp and self.py_src:
try:
return self._info[0].read()
finally:
self._info[0].close()
return _slurp(self.path)
def __repr__(self):
return 'ModuleInfo: py_src=%s, pkg_dir=%s, path=%s' % (self.py_src, self.pkg_dir, self.path)
class CollectionModuleInfo(ModuleInfo):
def __init__(self, name, paths):
self._mod_name = name
self.py_src = True
# FIXME: Implement pkg_dir so that we can place __init__.py files
self.pkg_dir = False
for path in paths:
self._package_name = '.'.join(path.split('/'))
try:
self.get_source()
except FileNotFoundError:
pass
else:
self.path = os.path.join(path, self._mod_name) + '.py'
break
else:
# FIXME (nitz): implement package fallback code
raise ImportError('unable to load collection-hosted module_util'
' {0}.{1}'.format(to_native(self._package_name),
to_native(name)))
def get_source(self):
# FIXME (nitz): need this in py2 for some reason TBD, but we shouldn't (get_data delegates
# to wrong loader without it)
pkg = import_module(self._package_name)
data = pkgutil.get_data(to_native(self._package_name), to_native(self._mod_name + '.py'))
return data
def recursive_finder(name, module_fqn, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module and its module_utils files needs.
:arg name: Name of the python module we're examining
:arg module_fqn: Fully qualified name of the python module we're scanning
:arg py_module_names: set of the fully qualified module names represented as a tuple of their
FQN with __init__ appended if the module is also a python package). Presence of a FQN in
this set means that we've already examined it for module_util deps.
:arg py_module_cache: map python module names (represented as a tuple of their FQN with __init__
appended if the module is also a python package) to a tuple of the code in the module and
the pathname the module would have inside of a Python toplevel (like site-packages)
:arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
which we're assembling
"""
# Parse the module and find the imports of ansible.module_utils
try:
tree = ast.parse(data)
except (SyntaxError, IndentationError) as e:
raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
finder = ModuleDepFinder(module_fqn)
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
# FIXME: Do we still need this? It feels like module-utils_loader should include
# _MODULE_UTILS_PATH
module_utils_paths.append(_MODULE_UTILS_PATH)
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0:3] == ('ansible', 'module_utils', 'six'):
# Special case the python six library because it messes with the
# import process in an incompatible way
module_info = ModuleInfo('six', module_utils_paths)
py_module_name = ('ansible', 'module_utils', 'six')
idx = 0
elif py_module_name[0:3] == ('ansible', 'module_utils', '_six'):
# Special case the python six library because it messes with the
# import process in an incompatible way
module_info = ModuleInfo('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('ansible', 'module_utils', 'six', '_six')
idx = 0
elif py_module_name[0] == 'ansible_collections':
# FIXME (nitz): replicate module name resolution like below for granular imports
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
# this is a collection-hosted MU; look it up with pkgutil.get_data()
module_info = CollectionModuleInfo(py_module_name[-idx],
[os.path.join(*py_module_name[:-idx])])
break
except ImportError:
continue
elif py_module_name[0:2] == ('ansible', 'module_utils'):
# Need to remove ansible.module_utils because PluginLoader may find different paths
# for us to look in
relative_module_utils_dir = py_module_name[2:]
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(relative_module_utils_dir) < idx:
break
try:
module_info = ModuleInfo(py_module_name[-idx],
[os.path.join(p, *relative_module_utils_dir[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
else:
# If we get here, it's because of a bug in ModuleDepFinder. If we get a reproducer we
# should then fix ModuleDepFinder
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
% [py_module_name])
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if isinstance(module_info, CollectionModuleInfo):
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# HACK: maybe surface collection dirs in here and use existing find_module code?
normalized_name = py_module_name
normalized_data = module_info.get_source()
normalized_path = os.path.join(*py_module_name)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# HACK: walk back up the package hierarchy to pick up package inits; this won't do the right thing
# for actual packages yet...
accumulated_pkg_name = []
for pkg in py_module_name[:-1]:
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name[:] + ['__init__']) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
normalized_path = os.path.join(*accumulated_pkg_name)
# HACK: possibly preserve some of the actual package file contents; problematic for extend_paths and others though?
normalized_data = ''
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if not module_info.pkg_dir and not module_info.py_src:
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info.pkg_dir:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_data = module_info.get_source()
py_module_cache[normalized_name] = (normalized_data, module_info.path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_data = module_info.get_source()
py_module_cache[normalized_name] = (normalized_data, module_info.path)
normalized_modules.add(normalized_name)
#
# Make sure that all the packages that this module is a part of
# are also added
#
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
# Need to remove ansible.module_utils because PluginLoader may find
# different paths for us to look in
relative_module_utils = py_pkg_name[2:]
pkg_dir_info = ModuleInfo(relative_module_utils[-1],
[os.path.join(p, *relative_module_utils[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (pkg_dir_info.get_source(), pkg_dir_info.path)
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('ansible', 'module_utils', 'basic',) not in py_module_names:
pkg_dir_info = ModuleInfo('basic', module_utils_paths)
normalized_modules.add(('ansible', 'module_utils', 'basic',))
py_module_cache[('ansible', 'module_utils', 'basic',)] = (pkg_dir_info.get_source(), pkg_dir_info.path)
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(py_module_file_name, py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
next_fqn = '.'.join(py_module_file)
recursive_finder(py_module_file[-1], next_fqn, py_module_cache[py_module_file][0],
py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _get_ansible_module_fqn(module_path):
"""
Get the fully qualified name for an ansible module based on its pathname
remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
.. warning:: This function is for ansible modules only. It won't work for other things
(non-module plugins, etc)
"""
remote_module_fqn = None
# Is this a core module?
match = CORE_LIBRARY_PATH_RE.search(module_path)
if not match:
# Is this a module in a collection?
match = COLLECTION_PATH_RE.search(module_path)
# We can tell the FQN for core modules and collection modules
if match:
path = match.group('path')
if '.' in path:
# FQNs must be valid as python identifiers. This sanity check has failed.
# we could check other things as well
raise ValueError('Module name (or path) was not a valid python identifier')
remote_module_fqn = '.'.join(path.split('/'))
else:
# Currently we do not handle modules in roles so we can end up here for that reason
raise ValueError("Unable to determine module's fully qualified name")
return remote_module_fqn
def _add_module_to_zip(zf, remote_module_fqn, b_module_data):
"""Add a module from ansible or from an ansible collection into the module zip"""
module_path_parts = remote_module_fqn.split('.')
# Write the module
module_path = '/'.join(module_path_parts) + '.py'
zf.writestr(module_path, b_module_data)
# Write the __init__.py's necessary to get there
if module_path_parts[0] == 'ansible':
# The ansible namespace is setup as part of the module_utils setup...
start = 2
existing_paths = frozenset()
else:
# ... but ansible_collections and other toplevels are not
start = 1
existing_paths = frozenset(zf.namelist())
for idx in range(start, len(module_path_parts)):
package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
# If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
if package_path in existing_paths:
continue
# Note: We don't want to include more than one ansible module in a payload at this time
# so no need to fill the __init__.py with namespace code
zf.writestr(package_path, b'')
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
try:
remote_module_fqn = _get_ansible_module_fqn(module_path)
except ValueError:
# Modules in roles currently are not found by the fqn heuristic so we
# fallback to this. This means that relative imports inside a module from
# a role may fail. Absolute imports should be used for future-proofness.
# People should start writing collections instead of modules in roles so we
# may never fix this
display.debug('ANSIBALLZ: Could not determine module FQN')
remote_module_fqn = 'ansible.modules.%s' % module_name
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
try:
python_repred_params = repr(json.dumps(params))
except TypeError as e:
raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
with open(cached_module_filename, 'rb') as module_data:
zipdata = module_data.read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# py_module_cache maps python module names to a tuple of the code in the module
# and the pathname to the module. See the recursive_finder() documentation for
# more info.
# Here we pre-load it with modules which we create without bothering to
# read from actual files (In some cases, these need to differ from what ansible
# ships because they're namespace packages in the module)
py_module_cache = {
('ansible', '__init__',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n'
b'__version__="' + to_bytes(__version__) +
b'"\n__author__="' + to_bytes(__author__) + b'"\n',
'ansible/__init__.py'),
('ansible', 'module_utils', '__init__',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n',
'ansible/module_utils/__init__.py')}
for (py_module_name, (file_data, filename)) in py_module_cache.items():
zf.writestr(filename, file_data)
# py_module_names keeps track of which modules we've already scanned for
# module_util dependencies
py_module_names.add(py_module_name)
# Returning the ast tree is a temporary hack. We need to know if the module has
# a main() function or not as we are deprecating new-style modules without
# main(). Because parsing the ast is expensive, return it from recursive_finder
# instead of reparsing. Once the deprecation is over and we remove that code,
# also remove returning of the ast tree.
recursive_finder(module_name, remote_module_fqn, b_module_data, py_module_names,
py_module_cache, zf)
display.debug('ANSIBALLZ: Writing module into payload')
_add_module_to_zip(zf, remote_module_fqn, b_module_data)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
with open(cached_module_filename, 'rb') as f:
zipdata = f.read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
rlimit_nofile = int(templar.template(rlimit_nofile))
if rlimit_nofile:
rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
rlimit_nofile=rlimit_nofile,
)
else:
rlimit = ''
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
if coverage_output:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=coverage_output,
)
else:
# Verify coverage is available without importing it.
# This will detect when a module would fail with coverage enabled with minimal overhead.
coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
module_fqn=remote_module_fqn,
params=python_repred_params,
shebang=shebang,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
rlimit=rlimit,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_path, module_args, environment,
async_timeout, become, become_method, become_user, become_password,
become_flags, module_substyle, task_vars, remote_module_fqn
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
def get_action_args_with_defaults(action, args, defaults, templar):
tmp_args = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
if isinstance(defaults, list):
for default in defaults:
module_defaults.update(default)
# if I actually have defaults, template and merge
if module_defaults:
module_defaults = templar.template(module_defaults)
# deal with configured group defaults first
if action in C.config.module_defaults_groups:
for group in C.config.module_defaults_groups.get(action, []):
tmp_args.update((module_defaults.get('group/{0}'.format(group)) or {}).copy())
# handle specific action defaults
if action in module_defaults:
tmp_args.update(module_defaults[action].copy())
# direct args override all
tmp_args.update(args)
return tmp_args
| ilpianista/ansible | lib/ansible/executor/module_common.py | Python | gpl-3.0 | 60,557 | [
"VisIt"
] | 9b8818ef015edf7c45884b92840dc1cd6da1e71dc40e648a53fec777ff104b3f |
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
[email protected]
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
[email protected]
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
[email protected]
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
Bug Fixes:
2011-08-26 NPMKuin (MSSL/UCL) some clarification in the documentation.
2013-11-19 NPMKuin (MSSL/UCL) changed import scipy.lib.blas[deprecated] to scipy.linalg.blas
changed trace of array in qrsolve() to a copy since it needs to be writeable.
Known bugs:
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p) # put here the function for the model.
#
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
# y(x) are the measured values, and err(x) are the errors in y.
#
return([status, (y-model)/err]
See below for applications with analytical derivatives.
Here 'x', 'y' and 'err' are the variables of the problem in the example above.
Their names can be changed as a passed parameter to mpfit. So they are
suggestive but not required. Any set of variables can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Parameters of the problem which
need optimization are then passed using the parameter list 'p'.
Use MPFITFUN and MPFITEXPR if you need ideas on how to do that.
The function *must* accept a parameter list, 'p'.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
To call the user function, you will need something like:
import mpfit
#import numpy.oldnumeric as Numeric
#
#... define your parameters
par = (p1,p2,p3,...)
#
#... get your data to define
xx = (ordinate)
yy = (measurements for each x)
e_yy = (errors in each y)
f = {'x':xx,'y':yy,'err':e_y} ANALYTIC DERIVATIVES
#
Z = mpfit.mpfit('myfunct', par, functkw=f, quiet=True)
results returned in Z.status, Z.params, Z.perror, etc.
And if you want to limit the parameters, add a list of disctionaries
in the parinfo keyword with the limits, etcetera.
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example with 5 parameters :
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
#import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit.mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import numpy
import types
import scipy.linalg.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit(object):
blas_enorm32, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values. (chi-square)
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) != list:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) != dict:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = old_div(sum(fjac[0:j+1,j]*qtf[0:j+1]),self.fnorm)
gnorm = numpy.max([gnorm,numpy.abs(old_div(sum0,wa2[l]))])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf, delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = (old_div((llim[whl] - x[whl]),
wa1[whl]))
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = (old_div((ulim[whu] - x[whu]),
wa1[whu]))
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(old_div(numpy.abs(nwa1[whmax]),
numpy.abs(maxstep[ifree[whmax]])))
if mrat > 1:
alpha = old_div(alpha, mrat)
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (old_div(fnorm1,self.fnorm))**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = old_div(self.enorm(alpha*wa3),self.fnorm)
temp2 = old_div((numpy.sqrt(alpha*par)*pnorm),self.fnorm)
prered = temp1*temp1 + old_div((temp2*temp2),0.5)
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = old_div(actred,prered)
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,old_div(pnorm,0.1)])
par = old_div(par,temp)
else:
if (par == 0) or (ratio >= 0.75):
delta = old_div(pnorm,.5)
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar).copy()
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print('Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print(p + (pformat % x[i]) + ' ')
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print('Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) == list:
test=default[0]
if isinstance(test, int):
values = numpy.asarray(values, int)
elif isinstance(test, float):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print('Entering call...')
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(old_div(f,self.damp))
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <[email protected]>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = old_div((fp-fvec),h[j])
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = old_div((fp-fm),(2*h[j]))
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print('Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = old_div(ajj, ajnorm)
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = old_div(a[j,lk],rdiag[k])
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = old_div(rdiag[k],wa[k])
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print('Entering qrsolv...')
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = old_div(r[k,k],sdiag[k])
sine = old_div(0.5,numpy.sqrt(.25 + .25*cotan*cotan))
cosine = sine*cotan
else:
tang = old_div(sdiag[k],r[k,k])
cosine = old_div(0.5,numpy.sqrt(.25 + .25*tang*tang))
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = old_div(wa[nsing-1],sdiag[nsing-1]) # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = old_div((wa[j]-sum0),sdiag[j])
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print('Entering lmpar...')
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r).copy())) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r).copy()) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = old_div(wa1[j],r[j,j])
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = old_div(wa1[0], r[0,0]) # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = old_div((wa1[j] - sum0),r[j,j])
temp = self.enorm(wa1)
parl = old_div((old_div((old_div(fp,delta)),temp)),temp)
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = old_div(sum0,diag[ipvt[j]])
gnorm = self.enorm(wa1)
paru = old_div(gnorm,delta)
if paru == 0:
paru = old_div(dwarf,numpy.min([delta,0.1]))
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = old_div(gnorm,dxnorm)
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = old_div(wa1[j],sdiag[j])
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = old_div(wa1[n-1],sdiag[n-1]) # Degenerate case
temp = self.enorm(wa1)
parc = old_div((old_div((old_div(fp,delta)),temp)),temp)
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print('Entering calc_covar...')
if numpy.array(rr).ndim != 2:
print('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = old_div(1.,r[k,k])
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar(object):
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
| bmazin/ARCONS-pipeline | util/mpfit.py | Python | gpl-2.0 | 80,276 | [
"Gaussian"
] | 6315585e7fb3e3eadf6877166b15758799e24c17a9640e413845b7a73a1dc2e7 |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import copy
import subprocess
import logging
import mooseutils
import moosetree
import pyhit
LOG = logging.getLogger(__name__)
from .SQARequirementReport import SQARequirementReport, SQARequirementDiffReport
from .SQADocumentReport import SQADocumentReport
from .SQAMooseAppReport import SQAMooseAppReport
def get_sqa_reports(config_file, app_report=True, doc_report=True, req_report=True):
"""
Generate reports regarding SQA content.
Input:
config_file[str|dict]: A YAML file to load or loaded YAML dict object
app/doc/req_report [bool]: Flags for controlling the creating of the various reports
Output:
doc_reports: List of SQADocumentReport objects for the existence of SQA documents links/files
req_reports: List of SQARequirementReport objects test requirement information
app_reports: List of SQAMooseAppReport objects for class documentation
See moose/scripts/check_sqa.py for usage.
"""
config = mooseutils.yaml_load(config_file) if isinstance(config_file, str) else config_file
doc_reports = _get_sqa_document_reports(config) if doc_report else None
req_reports = _get_sqa_requirement_reports(config) if req_report else None
app_reports = _get_sqa_app_reports(config) if app_report else None
return doc_reports, req_reports, app_reports
def _get_sqa_document_reports(config):
"""Helper function for building SQADocumentReport objects"""
if 'Documents' not in config:
return None
kwargs = config['Documents']
kwargs.setdefault('title', 'Documents')
doc_report = SQADocumentReport(**kwargs)
return [doc_report]
def _get_sqa_requirement_reports(config):
"""Helper for building the SQARequirementReport objects"""
if 'Requirements' not in config:
return None
req_config = config['Requirements']
reports = list()
# Local reports
diff_report = req_config.pop('create_diff_report', False)
for name, kwargs in req_config.items():
kwargs.setdefault('title', name)
local = SQARequirementReport(**kwargs)
reports.append(local)
# Local/global difference report
if diff_report:
diff = SQARequirementDiffReport(title='Missing Tests from Reports', reports=copy.copy(reports))
reports.append(diff)
return reports
def _get_sqa_app_reports(config):
"""Helper for building the SQAMooseAppReport objects"""
if 'Applications' not in config:
return None
app_configs = config['Applications']
reports = list()
for name, kwargs in app_configs.items():
kwargs.setdefault('title', name)
reports.append(SQAMooseAppReport(**kwargs))
return reports
| harterj/moose | python/moosesqa/get_sqa_reports.py | Python | lgpl-2.1 | 3,041 | [
"MOOSE"
] | 569e6a7017c34eef199cd3c2dfecd8e7c694d3e7c815c08611491e9e8ba06717 |
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import pytest
import numpy as np
from inspect import signature
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation, CompoundKernel)
from sklearn.base import clone
from sklearn.utils._testing import (assert_almost_equal, assert_array_equal,
assert_array_almost_equal,
assert_allclose,
assert_raise_message,
fails_if_pypy)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_rbf_plus_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
# Numerical precisions errors in PyPy
@fails_if_pypy
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_gradient(kernel):
# Compare analytic and numeric gradient of kernels.
K, K_gradient = kernel(X, eval_gradient=True)
assert K_gradient.shape[0] == X.shape[0]
assert K_gradient.shape[1] == X.shape[0]
assert K_gradient.shape[2] == kernel.theta.shape[0]
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
@pytest.mark.parametrize(
'kernel',
[kernel for kernel in kernels
# skip non-basic kernels
if not (isinstance(kernel, KernelOperator)
or isinstance(kernel, Exponentiation))])
def test_kernel_theta(kernel):
# Check that parameter vector theta of kernel is set correctly.
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert (
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters) ==
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert (theta[i] == np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert theta.shape[0] == new_kernel.theta.shape[0] + 1
assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
if i > 0:
assert theta[:i] == new_kernel.theta[:i]
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert theta[i + 1:] == new_kernel.theta[i:]
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
@pytest.mark.parametrize('kernel',
[kernel for kernel in kernels
# Identity is not satisfied on diagonal
if kernel != kernel_rbf_plus_white])
def test_auto_vs_cross(kernel):
# Auto-correlation and cross-correlation should be consistent.
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_diag(kernel):
# Test that diag method of kernel returns consistent results.
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
@pytest.mark.parametrize('kernel',
[kernel for kernel in kernels
if kernel.is_stationary()])
def test_kernel_stationary(kernel):
# Test stationarity of kernels.
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_input_type(kernel):
# Test whether kernels is for vectors or structured data
if isinstance(kernel, Exponentiation):
assert(kernel.requires_vector_input ==
kernel.kernel.requires_vector_input)
if isinstance(kernel, KernelOperator):
assert(kernel.requires_vector_input ==
(kernel.k1.requires_vector_input or
kernel.k2.requires_vector_input))
def test_compound_kernel_input_type():
kernel = CompoundKernel([WhiteKernel(noise_level=3.0)])
assert not kernel.requires_vector_input
kernel = CompoundKernel([WhiteKernel(noise_level=3.0),
RBF(length_scale=2.0)])
assert kernel.requires_vector_input
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert attr_value1 == attr_value2
@pytest.mark.parametrize("kernel", kernels)
def test_kernel_clone(kernel):
# Test that sklearn's clone works correctly on kernels.
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert kernel == kernel_cloned
assert id(kernel) != id(kernel_cloned)
# Check that all constructor parameters are equal.
assert kernel.get_params() == kernel_cloned.get_params()
# Check that all hyperparameters are equal.
check_hyperparameters_equal(kernel, kernel_cloned)
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_clone_after_set_params(kernel):
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
bounds = (1e-5, 1e5)
kernel_cloned = clone(kernel)
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
# XXX unreached code as of v0.22
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert (kernel_cloned_clone.get_params() == kernel_cloned.get_params())
assert id(kernel_cloned_clone) != id(kernel_cloned)
check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# matern kernel with coef0==inf is equal to RBF kernel
K_rbf = RBF(length_scale=1.0)(X)
K = Matern(nu=np.inf, length_scale=1.0)(X)
assert_array_almost_equal(K, K_rbf)
assert_allclose(K, K_rbf)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
# test that coef0==large is close to RBF
large = 100
K1 = Matern(nu=large, length_scale=1.0)(X)
K2 = RBF(length_scale=1.0)(X)
assert_array_almost_equal(K1, K2, decimal=2)
@pytest.mark.parametrize("kernel", kernels)
def test_kernel_versus_pairwise(kernel):
# Check that GP kernels can also be used as pairwise kernels.
# Test auto-kernel
if kernel != kernel_rbf_plus_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
@pytest.mark.parametrize("kernel", kernels)
def test_set_get_params(kernel):
# Check that set_params()/get_params() is consistent with kernel.theta.
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
@pytest.mark.parametrize("kernel", kernels)
def test_repr_kernels(kernel):
# Smoke-test for repr in kernels.
repr(kernel)
def test_rational_quadratic_kernel():
kernel = RationalQuadratic(length_scale=[1., 1.])
assert_raise_message(AttributeError,
"RationalQuadratic kernel only supports isotropic "
"version, please use a single "
"scalar for length_scale", kernel, X)
| anntzer/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | Python | bsd-3-clause | 14,237 | [
"Gaussian"
] | 402e9de70dd84ae4a9ad9de0afa78a3f637d2a82eac7befa12c3e781284cf597 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
****************************
**espresso.analysis.Energy**
****************************
"""
import espresso
class EnergyPot():
'Potential energy of the system.'
def __init__(self, system, per_atom=False):
self.system = system
self.per_atom = per_atom
def compute(self):
EPot = 0.0
for k in range(self.system.getNumberOfInteractions()):
EPot += self.system.getInteraction(k).computeEnergy()
if self.per_atom:
NPart = espresso.analysis.NPart(self.system).compute()
return EPot / NPart
else:
return EPot
class EnergyKin():
'Kinetic energy of the system.'
def __init__(self, system, per_atom=False):
self.system = system
self.per_atom = per_atom
def compute(self):
NPart = espresso.analysis.NPart(self.system).compute()
T = espresso.analysis.Temperature(self.system).compute()
EKin = (3.0/2.0) * NPart * T
if self.per_atom:
return EKin / NPart
else:
return EKin
class EnergyTot():
'Total energy (EKin + EPot) of the system.'
def __init__(self, system, per_atom=False):
self.system = system
self.per_atom = per_atom
def compute(self):
NPart = espresso.analysis.NPart(self.system).compute()
T = espresso.analysis.Temperature(self.system).compute()
EKin = (3.0/2.0) * NPart * T
EPot = 0.0
for k in range(self.system.getNumberOfInteractions()):
EPot += self.system.getInteraction(k).computeEnergy()
if self.per_atom:
return (EPot + EKin) / NPart
else:
return (EPot + EKin)
| BackupTheBerlios/espressopp | src/analysis/Energy.py | Python | gpl-3.0 | 2,570 | [
"ESPResSo"
] | 78f0914b5380d2801a1fefb14d3f729fdbcb9a9db97fd4c5d6f8d1e4438df379 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 41:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Edge-1']*30 + ['Surface']*30 + ['Edge-2']*30
PCA_chunk_1 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[0:41,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Features/single_feature/best_kNN_PC/cross_validate_categories_kNN_PC_BMED_8813_HAP_scaled_method_II_shape.py | Python | mit | 4,446 | [
"Mayavi"
] | 4ab2168370e8212873ab6639ff2ce37df4e1ec7ff80cc00f0fccabf63498bd88 |
from distutils.core import setup
setup(name="bikeshed",
version="1.0",
description="bikeshed REST API",
author_email="[email protected]",
url="http://www.bikeshed.io",
keywords="bikeshed",
py_modules=["bikeshed"],
)
| briancurtin/bikeshed | setup.py | Python | mit | 258 | [
"Brian"
] | cf8080315a0717e812f785d79fa508197833d380c2f71a9938fe1e2466cf2b3e |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import os
from unittest.mock import patch
import MDAnalysis as mda
import MDAnalysis.analysis.gnm
from numpy.testing import assert_almost_equal
import numpy as np
import pytest
from MDAnalysisTests.datafiles import GRO, XTC
@pytest.fixture()
def universe():
return mda.Universe(GRO, XTC)
def test_gnm(universe, tmpdir):
output = os.path.join(str(tmpdir), 'output.txt')
gnm = mda.analysis.gnm.GNMAnalysis(universe, ReportVector=output)
gnm.run()
result = gnm.results
assert len(result.times) == 10
assert_almost_equal(gnm.results.times, np.arange(0, 1000, 100), decimal=4)
assert_almost_equal(gnm.results.eigenvalues,
[2.0287113e-15, 4.1471575e-15, 1.8539533e-15, 4.3810359e-15,
3.9607304e-15, 4.1289113e-15, 2.5501084e-15, 4.0498182e-15,
4.2058769e-15, 3.9839431e-15])
def test_gnm_run_step(universe):
gnm = mda.analysis.gnm.GNMAnalysis(universe)
gnm.run(step=3)
result = gnm.results
assert len(result.times) == 4
assert_almost_equal(gnm.results.times, np.arange(0, 1200, 300), decimal=4)
assert_almost_equal(gnm.results.eigenvalues,
[2.0287113e-15, 4.3810359e-15, 2.5501084e-15, 3.9839431e-15])
def test_generate_kirchoff(universe):
gnm = mda.analysis.gnm.GNMAnalysis(universe)
gen = gnm.generate_kirchoff()
assert_almost_equal(gen[0],
[7,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_gnm_SVD_fail(universe):
with patch.object(np.linalg, "svd") as np_load_mock:
np_load_mock.side_effect = np.linalg.LinAlgError
msg = "SVD with cutoff 7.0 failed to converge. "
msg += "Skip frame at 0.0."
with pytest.warns(UserWarning, match=msg):
mda.analysis.gnm.GNMAnalysis(universe).run(stop=1)
def test_closeContactGNMAnalysis(universe):
gnm = mda.analysis.gnm.closeContactGNMAnalysis(universe, weights="size")
gnm.run(stop=2)
result = gnm.results
assert len(result.times) == 2
assert_almost_equal(gnm.results.times, (0, 100), decimal=4)
assert_almost_equal(gnm.results.eigenvalues, [0.1502614, 0.1426407])
gen = gnm.generate_kirchoff()
assert_almost_equal(gen[0],
[16.326744128018923, -2.716098853586913, -1.94736842105263, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, -0.05263157894736842, 0.0, 0.0, 0.0, -3.3541953679557905, 0.0, -1.4210526315789465, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, -1.0423368771244421, -1.3006649542861801, -0.30779350562554625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.927172649945531, -0.7509392614826383,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, -2.263157894736841, -0.24333213169614382])
def test_closeContactGNMAnalysis_weights_None(universe):
gnm = mda.analysis.gnm.closeContactGNMAnalysis(universe, weights=None)
gnm.run(stop=2)
result = gnm.results
assert len(result.times) == 2
assert_almost_equal(gnm.results.times, (0, 100), decimal=4)
assert_almost_equal(gnm.results.eigenvalues, [2.4328739, 2.2967251])
gen = gnm.generate_kirchoff()
assert_almost_equal(gen[0],
[303.0, -58.0, -37.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0,
0.0, 0.0, 0.0, -67.0, 0.0, -27.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -17.0, -15.0,
-6.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, -14.0, -15.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -43.0, -3.0])
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/analysis/test_gnm.py | Python | gpl-2.0 | 6,875 | [
"MDAnalysis"
] | 0cec2cc8103f68934b64beddf9a4b62d04a043521c182b691589f18235485e9f |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
The core of the Gramps plugin system. This module provides capability to load
plugins from specified directories and provide information about the loaded
plugins.
Plugins are divided into several categories. These are: reports, tools,
importers, exporters, quick reports, and document generators.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import re
import logging
LOG = logging.getLogger('._manager')
LOG.progagate = True
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..config import config
from . import PluginRegister, ImportPlugin, ExportPlugin, DocGenPlugin
from ..constfunc import win
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UNAVAILABLE = _("No description was provided")
#-------------------------------------------------------------------------
#
# BasePluginManager
#
#-------------------------------------------------------------------------
class BasePluginManager:
""" unique singleton storage class for a :class:`.PluginManager`. """
__instance = None
def get_instance():
"""
Use this function to get the instance of the :class:`.PluginManager`
"""
if BasePluginManager.__instance is None:
BasePluginManager.__instance = 1 # Set to 1 for __init__()
BasePluginManager.__instance = BasePluginManager()
return BasePluginManager.__instance
get_instance = staticmethod(get_instance)
def __init__(self):
""" This function should only be run once by get_instance() """
if BasePluginManager.__instance is not 1:
raise Exception("This class is a singleton. "
"Use the get_instance() method")
self.__import_plugins = []
self.__export_plugins = []
self.__docgen_plugins = []
self.__attempt_list = []
self.__failmsg_list = []
self.__external_opt_dict = {}
self.__success_list = []
self.__docgen_names = []
self.__mod2text = {}
self.__modules = {}
self.__pgr = PluginRegister.get_instance()
self.__loaded_plugins = {}
self.__scanned_dirs = []
def reg_plugins(self, direct, dbstate=None, uistate=None,
load_on_reg=False):
"""
Searches the specified directory, and registers python plugin that
are being defined in gpr.py files.
If a relationship calculator for env var LANG is present, it is
immediately loaded so it is available for all.
"""
# if we've already scanned this directory or if the directory does not
# exist, we are done. Should only happen in tests.
# LOG.warning("\nPlugin manager registration: %s, load_on_reg=%s,"
# " been_here=%s, pahte exists:%s", direct, load_on_reg,
# direct in self.__scanned_dirs, os.path.isdir(direct))
if os.path.isdir(direct) and direct not in self.__scanned_dirs:
self.__scanned_dirs.append(direct)
for (dirpath, dirnames, filenames) in os.walk(direct,
topdown=True):
for dirname in dirnames[:]:
# Skip hidden and system directories:
if dirname.startswith(".") or dirname in ["po", "locale",
"__pycache__"]:
dirnames.remove(dirname)
# LOG.warning("Plugin dir scanned: %s", dirpath)
self.__pgr.scan_dir(dirpath, filenames, uistate=uistate)
if load_on_reg:
# Run plugins that request to be loaded on startup and
# have a load_on_reg callable.
# first, remove hidden
plugins_to_load = []
for plugin in self.__pgr.filter_load_on_reg():
# LOG.warning("\nFound %s at registration", plugin.id)
if plugin.id in config.get("plugin.hiddenplugins"):
continue
plugins_to_load.append(plugin)
# next, sort on dependencies
# Probably a more efficient method to get dependency graph:
plugins_sorted = []
count = 0
max_count = len(plugins_to_load)
while plugins_to_load:
for plugin in plugins_to_load[:]: # copy of list
# LOG.warning("\nDependencies for %s at registration",
# plugin.id)
delay = False
for depend in plugin.depends_on:
if depend not in [p.id for p in plugins_sorted]:
delay = True
break
if delay:
pass # wait till next loop
else:
if plugin not in plugins_sorted:
plugins_sorted.append(plugin)
if plugin in plugins_to_load:
plugins_to_load.remove(plugin)
count += 1
if count > max_count:
print("Cannot resolve the following plugin dependencies:")
for plugin in plugins_to_load:
print(" Plugin '%s' requires: %s" % (
plugin.id, plugin.depends_on))
break
# now load them:
for plugin in plugins_sorted:
# next line shouldn't be necessary, but this gets called a lot
# of times during Travis test; so avoid multiple copies
plugin.data = []
mod = self.load_plugin(plugin)
if hasattr(mod, "load_on_reg"):
# LOG.warning("\nRun %s at registration", plugin.id)
try:
results = mod.load_on_reg(dbstate, uistate, plugin)
except:
import traceback
traceback.print_exc()
print("Plugin '%s' did not run; continuing..." % plugin.name)
continue
try:
iter(results)
plugin.data += results
except:
plugin.data = results
def is_loaded(self, pdata_id):
"""
return True if plugin is already loaded
"""
if pdata_id in self.__loaded_plugins:
return True
return False
def load_plugin(self, pdata):
"""
Load a :class:`.PluginData` object. This means import of the python
module. Plugin directories are added to sys path, so files are found
"""
if pdata.id in self.__loaded_plugins:
return self.__loaded_plugins[pdata.id]
need_reload = False
filename = pdata.fname
if filename in self.__modules:
#filename is loaded already, a different plugin in this module
_module = self.__modules[filename]
self.__success_list.append((filename, _module, pdata))
self.__loaded_plugins[pdata.id] = _module
self.__mod2text[_module.__name__] += ' - ' + pdata.description
return _module
if filename in self.__attempt_list:
#new load attempt after a fail, a reload needed
need_reload = True
#remove previous fail of the plugins in this file
dellist = []
for index, data in enumerate(self.__failmsg_list):
if data[0] == filename:
dellist.append(index)
dellist.reverse()
for index in dellist:
del self.__failmsg_list[index]
else:
self.__attempt_list.append(filename)
try:
_module = self.import_plugin(pdata)
if need_reload:
# For some strange reason second importing of a failed plugin
# results in success. Then reload reveals the actual error.
# Looks like a bug in Python.
_module = self.reload(_module, pdata)
if _module:
self.__success_list.append((filename, _module, pdata))
self.__modules[filename] = _module
self.__loaded_plugins[pdata.id] = _module
self.__mod2text[_module.__name__] = pdata.description
return _module
except:
import traceback
traceback.print_exc()
self.__failmsg_list.append((filename, sys.exc_info(), pdata))
return None
def import_plugin(self, pdata):
"""
Rather than just __import__(id), this will add the pdata.fpath
to sys.path first (if needed), import, and then reset path.
"""
module = None
if isinstance(pdata, str):
pdata = self.get_plugin(pdata)
if not pdata:
return None
if pdata.fpath not in sys.path:
if pdata.mod_name:
sys.path.insert(0, pdata.fpath)
try:
module = __import__(pdata.mod_name)
except ValueError as err:
# Python3 on Windows work with unicode in sys.path
# but they are mbcs encode for checking validity
if win():
# we don't want to load Gramps core plugin like this
# only 3rd party plugins
if "gramps" in pdata.fpath:
try:
sys.path.insert(0, ".")
oldwd = os.getcwd()
os.chdir(pdata.fpath)
module = __import__(pdata.mod_name)
os.chdir(oldwd)
sys.path.pop(0)
except ValueError as err:
LOG.warning("Plugin error (from '%s'): %s"
% (pdata.mod_name, err))
else:
LOG.warning("Plugin error (from '%s'): %s"
% (pdata.mod_name, err))
except ImportError as err:
LOG.warning("Plugin error (from '%s'): %s"
% (pdata.mod_name, err))
sys.path.pop(0)
else:
print("WARNING: module cannot be loaded")
else:
module = __import__(pdata.mod_name)
return module
def empty_managed_plugins(self):
""" For some plugins, managed Plugin are used. These are only
reobtained from the registry if this method is called
"""
# TODO: do other lists need to be reset here, too?
self.__import_plugins = []
self.__export_plugins = []
self.__docgen_plugins = []
def reload_plugins(self):
""" Reload previously loaded plugins """
pymod = re.compile(r"^(.*)\.py$")
oldfailmsg = self.__failmsg_list[:]
self.__failmsg_list = []
# attempt to reload all plugins that have succeeded in the past
self.empty_managed_plugins()
self.__loaded_plugins = {}
oldmodules = self.__modules
self.__modules = {}
dellist = []
#reload first modules that loaded successfully previously
for (index, plugin) in enumerate(self.__success_list):
filename = plugin[0]
pdata = plugin[2]
filename = filename.replace('pyc','py')
filename = filename.replace('pyo','py')
if filename in self.__modules:
#module already reloaded, a second plugin in same module
continue
try:
self.reload(plugin[1], pdata)
self.__modules[filename] = plugin[1]
self.__loaded_plugins[pdata.id] = plugin[1]
except:
dellist.append(index)
self.__failmsg_list.append((filename, sys.exc_info(), pdata))
dellist.reverse()
for index in dellist:
del self.__success_list[index]
# Remove previously good plugins that are now bad
# from the registered lists
self.__purge_failed()
# attempt to load the plugins that have failed in the past
for (filename, message, pdata) in oldfailmsg:
self.load_plugin(pdata)
def reload(self, module, pdata):
"""
Reloads modules that might not be in the path.
"""
try:
import imp
fp, pathname, description = imp.find_module(pdata.mod_name, [pdata.fpath])
try:
module = imp.load_module(pdata.mod_name, fp, pathname,description)
finally:
if fp:
fp.close()
except:
if pdata.mod_name in sys.modules:
del sys.modules[pdata.mod_name]
module = self.import_plugin(pdata)
return module
def get_fail_list(self):
""" Return the list of failed plugins. """
return self.__failmsg_list
def get_success_list(self):
""" Return the list of succeeded plugins. """
return self.__success_list
def get_plugin(self, id):
"""
Returns a plugin object from :class:`.PluginRegister` by id.
"""
return self.__pgr.get_plugin(id)
def get_reg_reports(self, gui=True):
""" Return list of registered reports
:param gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return self.__pgr.report_plugins(gui)
def get_reg_tools(self, gui=True):
""" Return list of registered tools
:aram gui: bool indicating if GUI reports or CLI reports must be
returned
"""
return self.__pgr.tool_plugins(gui)
def get_reg_quick_reports(self):
""" Return list of registered quick reports
"""
return self.__pgr.quickreport_plugins()
def get_reg_views(self):
""" Return list of registered views
"""
return self.__pgr.view_plugins()
def get_reg_mapservices(self):
""" Return list of registered mapservices
"""
return self.__pgr.mapservice_plugins()
def get_reg_bookitems(self):
""" Return list of reports registered as bookitem
"""
return self.__pgr.bookitem_plugins()
def get_reg_gramplets(self):
""" Return list of non hidden gramplets.
"""
return self.__pgr.gramplet_plugins()
def get_reg_sidebars(self):
""" Return list of registered sidebars.
"""
return self.__pgr.sidebar_plugins()
def get_reg_databases(self):
""" Return list of registered database backends
"""
return self.__pgr.database_plugins()
def get_external_opt_dict(self):
""" Return the dictionary of external options. """
return self.__external_opt_dict
def get_module_description(self, module):
""" Given a module name, return the module description. """
return self.__mod2text.get(module, '')
def get_reg_importers(self):
""" Return list of registered importers
"""
return self.__pgr.import_plugins()
def get_reg_exporters(self):
""" Return list of registered exporters
"""
return self.__pgr.export_plugins()
def get_reg_docgens(self):
""" Return list of registered docgen
"""
return self.__pgr.docgen_plugins()
def get_reg_general(self, category=None):
""" Return list of registered general libs
"""
return self.__pgr.general_plugins(category)
def load_plugin_category(self, category):
"""
Make sure all plugins of a type are loaded.
"""
for plugin in self.__pgr.general_plugins(category):
if not self.is_loaded(plugin):
self.load_plugin(plugin)
def get_plugin_data(self, category):
"""
Gets all of the data from general plugins of type category.
plugin.data may be a single item, an iterable, or a callable.
>>> PLUGMAN.get_plugin_data('CSS')
<a list of raw data items>
"""
retval = []
data = None
for plugin in self.__pgr.general_plugins(category):
data = plugin.data
try:
iter(data)
retval.extend(data)
except:
retval.append(data)
return retval
def process_plugin_data(self, category):
"""
Gathers all of the data from general plugins of type category,
and pass it to a single process function from one of those
plugins.
>>> PLUGMAN.process_plugin_data('CSS')
<a list of processed data items>
"""
retval = []
data = None
process = None
for plugin in self.__pgr.general_plugins(category):
if plugin.process is not None:
mod = self.load_plugin(plugin)
if hasattr(mod, plugin.process):
process = getattr(mod, plugin.process)
data = plugin.data
if data:
try:
iter(data)
retval.extend(data)
except:
retval.append(data)
# LOG.warning("Process plugin data=%s, %s, items=%s",
# process is not None, category, len(retval))
if process:
return process(retval)
return retval
def get_import_plugins(self):
"""
Get the list of import plugins.
:return: :class:`.ImportPlugin` (a list of ImportPlugin instances)
"""
## TODO: would it not be better to remove ImportPlugin and use
## only PluginData, loading from module when importfunction needed?
if self.__import_plugins == []:
#The module still needs to be imported
for pdata in self.get_reg_importers():
if pdata.id in config.get("plugin.hiddenplugins"):
continue
mod = self.load_plugin(pdata)
if mod:
imp = ImportPlugin(name=pdata.name,
description = pdata.description,
import_function = getattr(mod, pdata.import_function),
extension = pdata.extension)
self.__import_plugins.append(imp)
return self.__import_plugins
def get_export_plugins(self):
"""
Get the list of export plugins.
:return: :class:`.ExportPlugin` (a list of ExportPlugin instances)
"""
## TODO: would it not be better to remove ExportPlugin and use
## only PluginData, loading from module when export/options needed?
if self.__export_plugins == []:
#The modules still need to be imported
for pdata in self.get_reg_exporters():
if pdata.id in config.get("plugin.hiddenplugins"):
continue
mod = self.load_plugin(pdata)
if mod:
options = None
if (pdata.export_options and
hasattr(mod, pdata.export_options)):
options = getattr(mod, pdata.export_options)
exp = ExportPlugin(name=pdata.name_accell,
description = pdata.description,
export_function = getattr(mod, pdata.export_function),
extension = pdata.extension,
config = (pdata.export_options_title, options))
self.__export_plugins.append(exp)
return self.__export_plugins
def get_docgen_plugins(self):
"""
Get the list of docgen plugins.
:return: :class:`.DocGenPlugin` (a list of DocGenPlugin instances)
"""
## TODO: would it not be better to return list of plugindata, and only
## import those docgen that will then actuallly be needed?
## So, only do import when docgen.get_basedoc() is requested
if self.__docgen_plugins == []:
#The modules still need to be imported
hiddenplugins = config.get("plugin.hiddenplugins")
for pdata in self.get_reg_docgens():
if pdata.id in hiddenplugins:
continue
mod = self.load_plugin(pdata)
if mod:
oclass = None
if pdata.optionclass:
oclass = getattr(mod, pdata.optionclass)
dgp = DocGenPlugin(name=pdata.name,
description = pdata.description,
basedoc = getattr(mod, pdata.docclass),
paper = pdata.paper,
style = pdata.style,
extension = pdata.extension,
docoptclass = oclass,
basedocname = pdata.docclass )
self.__docgen_plugins.append(dgp)
return self.__docgen_plugins
def get_docgen_names(self):
"""
Get the list of docgen plugin names.
:return: a list of :class:`.DocGenPlugin` names
"""
if self.__docgen_names == []:
hiddenplugins = config.get("plugin.hiddenplugins")
for pdata in self.get_reg_docgens():
if pdata.id not in hiddenplugins:
self.__docgen_names.append(pdata.docclass)
return self.__docgen_names
def register_option(self, option, guioption):
"""
Register an external option.
Register a mapping from option to guioption for an option
that is not native to Gramps but provided by the plugin writer.
This should typically be called during initialisation of a
:class:`.ReportOptions` class.
:param option: the option class
:type option: class that inherits from gen.plug.menu.Option
:param guioption: the gui-option class
:type guioption: class that inherits from Gtk.Widget.
"""
self.__external_opt_dict[option] = guioption;
def __purge_failed(self):
"""
Purge the failed plugins from the corresponding lists.
"""
failed_module_names = [
os.path.splitext(os.path.basename(filename))[0]
for filename, msg, pdata in self.__failmsg_list
]
self.__export_plugins[:] = [ item for item in self.__export_plugins
if item.get_module_name() not in failed_module_names ][:]
self.__import_plugins[:] = [ item for item in self.__import_plugins
if item.get_module_name() not in failed_module_names ][:]
self.__docgen_plugins[:] = [ item for item in self.__docgen_plugins
if item.get_module_name() not in failed_module_names ][:]
| beernarrd/gramps | gramps/gen/plug/_manager.py | Python | gpl-2.0 | 24,793 | [
"Brian"
] | 8da6f46ffa280186edc0440725b602d5e4e8e73399d609a86b80fecb96d0e49a |
''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
from pytest import raises as assert_raises
from scipy.io.netcdf import netcdf_file, IS_PYPY
from scipy._lib._numpy_compat import suppress_warnings
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr: ndarray or MaskedArray
Array to test.
expected_mask: array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default (but not on pypy)
assert_equal(f.use_mmap, not IS_PYPY)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with suppress_warnings() as sup:
if IS_PYPY:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True.*")
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except: # noqa: E722
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_bytes():
raw_file = BytesIO()
f = netcdf_file(raw_file, mode='w')
# Dataset only has a single variable, dimension and attribute to avoid
# any ambiguity related to order.
f.a = 'b'
f.createDimension('dim', 1)
var = f.createVariable('var', np.int16, ('dim',))
var[0] = -9999
var.c = 'd'
f.sync()
actual = raw_file.getvalue()
expected = (b'CDF\x01'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x0a'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x03'
b'dim\x00'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x0c'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x01'
b'a\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x01'
b'b\x00\x00\x00'
b'\x00\x00\x00\x0b'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x03'
b'var\x00'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x0c'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x01'
b'c\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x01'
b'd\x00\x00\x00'
b'\x00\x00\x00\x03'
b'\x00\x00\x00\x04'
b'\x00\x00\x00\x78'
b'\xd8\xf1\x80\x01')
assert_equal(actual, expected)
def test_encoded_fill_value():
with netcdf_file(BytesIO(), mode='w') as f:
f.createDimension('x', 1)
var = f.createVariable('var', 'S1', ('x',))
assert_equal(var._get_encoded_fill_value(), b'\x00')
var._FillValue = b'\x01'
assert_equal(var._get_encoded_fill_value(), b'\x01')
var._FillValue = b'\x00\x00' # invalid, wrong size
assert_equal(var._get_encoded_fill_value(), b'\x00')
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
with netcdf_file(filename, 'r', mmap=True) as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_appending_issue_gh_8625():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
f.createDimension('x', 2)
f.createVariable('x', float, ('x',))
f.variables['x'][...] = 1
f.flush()
contents = stream.getvalue()
stream = BytesIO(contents)
with netcdf_file(stream, mode='a') as f:
f.variables['x'][...] = 2
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
if not IS_PYPY:
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_append_recordDimension():
dataSize = 100
with in_tempdir():
# Create file with record time dimension
with netcdf_file('withRecordDimension.nc', 'w') as f:
f.createDimension('time', None)
f.createVariable('time', 'd', ('time',))
f.createDimension('x', dataSize)
x = f.createVariable('x', 'd', ('x',))
x[:] = np.array(range(dataSize))
f.createDimension('y', dataSize)
y = f.createVariable('y', 'd', ('y',))
y[:] = np.array(range(dataSize))
f.createVariable('testData', 'i', ('time', 'x', 'y'))
f.flush()
f.close()
for i in range(2):
# Open the file in append mode and add data
with netcdf_file('withRecordDimension.nc', 'a') as f:
f.variables['time'].data = np.append(f.variables["time"].data, i)
f.variables['testData'][i, :, :] = np.full((dataSize, dataSize), i)
f.flush()
# Read the file and check that append worked
with netcdf_file('withRecordDimension.nc') as f:
assert_equal(f.variables['time'][-1], i)
assert_equal(f.variables['testData'][-1, :, :].copy(), np.full((dataSize, dataSize), i))
assert_equal(f.variables['time'].data.shape[0], i+1)
assert_equal(f.variables['testData'].data.shape[0], i+1)
# Read the file and check that 'data' was not saved as user defined
# attribute of testData variable during append operation
with netcdf_file('withRecordDimension.nc') as f:
with assert_raises(KeyError) as ar:
f.variables['testData']._attributes['data']
ex = ar.value
assert_equal(ex.args[0], 'data')
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
| lhilt/scipy | scipy/io/tests/test_netcdf.py | Python | bsd-3-clause | 19,261 | [
"NetCDF"
] | 08e3c126ca23ad52a2391b246a18055648bfc4cde00b24b0491f64da17272c44 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 gimoh
#
# This file is part of devops-utils.
#
# devops-utils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# devops-utils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with devops-utils. If not, see <http://www.gnu.org/licenses/>.
"""devops-utils - devops-utils image helper tools"""
__author__ = 'gimoh'
__email__ = '[email protected]'
__version__ = '0.1.0'
PROGS = (
'ansible', 'ansible-doc', 'ansible-galaxy', 'ansible-playbook',
'ansible-vault', 'fab',
)
PLUGIN_DIR = '/etc/devops-utils'
| gimoh/devops-utils | devops_utils/__init__.py | Python | gpl-3.0 | 1,021 | [
"Galaxy"
] | 03cc5c7470322f33a0bec2c8e80747a3ab6d5767a5c6b4e2cff9daa008aac7a8 |
#!/usr/bin/python
import unittest
import os
import random
import json
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.surface import Slab, SlabGenerator, generate_all_slabs, \
get_symmetrically_distinct_miller_indices, get_symmetrically_equivalent_miller_indices, \
ReconstructionGenerator, miller_index_from_sites, get_d, get_slab_regions
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.structure_matcher import StructureMatcher
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests",
path_str)
return path
class SlabTest(PymatgenTest):
def setUp(self):
zno1 = Structure.from_file(get_path("ZnO-wz.cif"), primitive=False)
zno55 = SlabGenerator(zno1, [1, 0, 0], 5, 5, lll_reduce=False,
center_slab=False).get_slab()
Ti = Structure(Lattice.hexagonal(4.6, 2.82), ["Ti", "Ti", "Ti"],
[[0.000000, 0.000000, 0.000000],
[0.333333, 0.666667, 0.500000],
[0.666667, 0.333333, 0.500000]])
Ag_fcc = Structure(Lattice.cubic(4.06), ["Ag", "Ag", "Ag", "Ag"],
[[0.000000, 0.000000, 0.000000],
[0.000000, 0.500000, 0.500000],
[0.500000, 0.000000, 0.500000],
[0.500000, 0.500000, 0.000000]])
self.ti = Ti
self.agfcc = Ag_fcc
self.zno1 = zno1
self.zno55 = zno55
self.h = Structure(Lattice.cubic(3), ["H"],
[[0, 0, 0]])
self.libcc = Structure(Lattice.cubic(3.51004), ["Li", "Li"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_init(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.parameters, self.zno55.lattice.parameters)
self.assertEqual(zno_slab.oriented_unit_cell.composition, self.zno1.composition)
self.assertEqual(len(zno_slab), 8)
def test_add_adsorbate_atom(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
zno_slab.add_adsorbate_atom([1], 'H', 1)
self.assertEqual(len(zno_slab), 9)
self.assertEqual(str(zno_slab[8].specie), 'H')
self.assertAlmostEqual(zno_slab.get_distance(1, 8), 1.0)
self.assertTrue(zno_slab[8].c > zno_slab[0].c)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.parameters, self.zno55.lattice.parameters)
def test_get_sorted_structure(self):
species = [str(site.specie) for site in
self.zno55.get_sorted_structure()]
self.assertEqual(species, ["Zn2+"] * 4 + ["O2-"] * 4)
def test_methods(self):
# Test various structure methods
self.zno55.get_primitive_structure()
def test_as_from_dict(self):
d = self.zno55.as_dict()
obj = Slab.from_dict(d)
self.assertEqual(obj.miller_index, (1, 0, 0))
def test_dipole_and_is_polar(self):
self.assertArrayAlmostEqual(self.zno55.dipole, [0, 0, 0])
self.assertFalse(self.zno55.is_polar())
cscl = self.get_structure("CsCl")
cscl.add_oxidation_state_by_element({"Cs": 1, "Cl": -1})
slab = SlabGenerator(cscl, [1, 0, 0], 5, 5, reorient_lattice=False,
lll_reduce=False, center_slab=False).get_slab()
self.assertArrayAlmostEqual(slab.dipole, [-4.209, 0, 0])
self.assertTrue(slab.is_polar())
def test_surface_sites_and_symmetry(self):
# test if surfaces are equivalent by using
# Laue symmetry and surface site equivalence
for bool in [True, False]:
# We will also set the slab to be centered and
# off centered in order to test the center of mass
slabgen = SlabGenerator(self.agfcc, (3, 1, 0), 10, 10,
center_slab=bool)
slab = slabgen.get_slabs()[0]
surf_sites_dict = slab.get_surface_sites()
self.assertEqual(len(surf_sites_dict["top"]),
len(surf_sites_dict["bottom"]))
total_surf_sites = sum([len(surf_sites_dict[key])
for key in surf_sites_dict.keys()])
self.assertTrue(slab.is_symmetric())
self.assertEqual(total_surf_sites / 2, 4)
self.assertTrue(slab.have_equivalent_surfaces())
# Test if the ratio of surface sites per area is
# constant, ie are the surface energies the same
r1 = total_surf_sites / (2 * slab.surface_area)
slabgen = SlabGenerator(self.agfcc, (3, 1, 0), 10, 10,
primitive=False)
slab = slabgen.get_slabs()[0]
surf_sites_dict = slab.get_surface_sites()
total_surf_sites = sum([len(surf_sites_dict[key])
for key in surf_sites_dict.keys()])
r2 = total_surf_sites / (2 * slab.surface_area)
self.assertArrayEqual(r1, r2)
def test_symmetrization(self):
# Restricted to primitive_elemental materials due to the risk of
# broken stoichiometry. For compound materials, use is_polar()
# Get all slabs for P6/mmm Ti and Fm-3m Ag up to index of 2
all_Ti_slabs = generate_all_slabs(self.ti, 2, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=2,
symmetrize=True)
all_Ag_fcc_slabs = generate_all_slabs(self.agfcc, 2, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0,
lll_reduce=False,
center_slab=False,
primitive=True,
max_normal_search=2,
symmetrize=True)
all_slabs = [all_Ti_slabs, all_Ag_fcc_slabs]
for i, slabs in enumerate(all_slabs):
assymetric_count = 0
symmetric_count = 0
for i, slab in enumerate(slabs):
sg = SpacegroupAnalyzer(slab)
# Check if a slab is symmetric
if not sg.is_laue():
assymetric_count += 1
else:
symmetric_count += 1
# Check if slabs are all symmetric
self.assertEqual(assymetric_count, 0)
self.assertEqual(symmetric_count, len(slabs))
def test_get_symmetric_sites(self):
# Check to see if we get an equivalent site on one
# surface if we add a new site to the other surface
all_Ti_slabs = generate_all_slabs(self.ti, 2, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=2,
symmetrize=True)
for slab in all_Ti_slabs:
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
site = sorted_sites[-1]
point = np.array(site.frac_coords)
point[2] = point[2] + 0.1
point2 = slab.get_symmetric_site(point)
slab.append("O", point)
slab.append("O", point2)
# Check if slab is all symmetric
sg = SpacegroupAnalyzer(slab)
self.assertTrue(sg.is_laue())
def test_oriented_unit_cell(self):
# Check to see if we get the fully reduced oriented unit
# cell. This will also ensure that the constrain_latt
# parameter for get_primitive_structure is working properly
def surface_area(s):
m = s.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
all_slabs = generate_all_slabs(self.agfcc, 2, 10, 10,
max_normal_search=3)
for slab in all_slabs:
ouc = slab.oriented_unit_cell
self.assertAlmostEqual(surface_area(slab), surface_area(ouc))
self.assertGreaterEqual(len(slab), len(ouc))
def test_get_slab_regions(self):
# If a slab layer in the slab cell is not completely inside
# the cell (noncontiguous), check that get_slab_regions will
# be able to identify where the slab layers are located
s = self.get_structure("LiFePO4")
slabgen = SlabGenerator(s, (0, 0, 1), 15, 15)
slab = slabgen.get_slabs()[0]
slab.translate_sites([i for i, site in enumerate(slab)], [0, 0, -0.25])
bottom_c, top_c = [], []
for site in slab:
if site.frac_coords[2] < 0.5:
bottom_c.append(site.frac_coords[2])
else:
top_c.append(site.frac_coords[2])
ranges = get_slab_regions(slab)
self.assertEqual(tuple(ranges[0]), (0, max(bottom_c)))
self.assertEqual(tuple(ranges[1]), (min(top_c), 1))
def test_as_dict(self):
slabs = generate_all_slabs(self.ti, 1, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0, lll_reduce=False, center_slab=False,
primitive=True)
slab = slabs[0]
s = json.dumps(slab.as_dict())
d = json.loads(s)
self.assertEqual(slab, Slab.from_dict(d))
class SlabGeneratorTest(PymatgenTest):
def setUp(self):
lattice = Lattice.cubic(3.010)
frac_coords = [[0.00000, 0.00000, 0.00000],
[0.00000, 0.50000, 0.50000],
[0.50000, 0.00000, 0.50000],
[0.50000, 0.50000, 0.00000],
[0.50000, 0.00000, 0.00000],
[0.50000, 0.50000, 0.50000],
[0.00000, 0.00000, 0.50000],
[0.00000, 0.50000, 0.00000]]
species = ['Mg', 'Mg', 'Mg', 'Mg', 'O', 'O', 'O', 'O']
self.MgO = Structure(lattice, species, frac_coords)
self.MgO.add_oxidation_state_by_element({"Mg": 2, "O": -6})
lattice_Dy = Lattice.hexagonal(3.58, 25.61)
frac_coords_Dy = [[0.00000, 0.00000, 0.00000],
[0.66667, 0.33333, 0.11133],
[0.00000, 0.00000, 0.222],
[0.66667, 0.33333, 0.33333],
[0.33333, 0.66666, 0.44467],
[0.66667, 0.33333, 0.55533],
[0.33333, 0.66667, 0.66667],
[0.00000, 0.00000, 0.778],
[0.33333, 0.66667, 0.88867]]
species_Dy = ['Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy']
self.Dy = Structure(lattice_Dy, species_Dy, frac_coords_Dy)
def test_get_slab(self):
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
s = gen.get_slab(0.25)
self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, max_normal_search=1)
slab = gen.get_slab()
self.assertEqual(len(slab), 6)
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False, max_normal_search=1)
slab_non_prim = gen.get_slab()
self.assertEqual(len(slab_non_prim), len(slab) * 4)
# Some randomized testing of cell vectors
for i in range(1, 231):
i = random.randint(1, 230)
sg = SpaceGroup.from_int_number(i)
if sg.crystal_system == "hexagonal" or (sg.crystal_system == "trigonal" and (sg.symbol.endswith("H") or
sg.int_number in [
143, 144, 145,
147, 149, 150,
151, 152,
153, 154, 156,
157, 158, 159,
162, 163,
164, 165])):
latt = Lattice.hexagonal(5, 10)
else:
# Cubic lattice is compatible with all other space groups.
latt = Lattice.cubic(5)
s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
miller = (0, 0, 0)
while miller == (0, 0, 0):
miller = (random.randint(0, 6), random.randint(0, 6),
random.randint(0, 6))
gen = SlabGenerator(s, miller, 10, 10)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_normal_search(self):
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
for miller in [(1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 1, 1)]:
gen = SlabGenerator(fcc, miller, 10, 10)
gen_normal = SlabGenerator(fcc, miller, 10, 10,
max_normal_search=max(miller))
slab = gen_normal.get_slab()
self.assertAlmostEqual(slab.lattice.alpha, 90)
self.assertAlmostEqual(slab.lattice.beta, 90)
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
graphite = self.get_structure("Graphite")
for miller in [(1, 0, 0), (1, 1, 0), (0, 0, 1), (2, 1, 1)]:
gen = SlabGenerator(graphite, miller, 10, 10)
gen_normal = SlabGenerator(graphite, miller, 10, 10,
max_normal_search=max(miller))
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
sc = Structure(Lattice.hexagonal(3.32, 5.15), ["Sc", "Sc"],
[[1 / 3, 2 / 3, 0.25], [2 / 3, 1 / 3, 0.75]])
gen = SlabGenerator(sc, (1, 1, 1), 10, 10, max_normal_search=1)
self.assertAlmostEqual(gen.oriented_unit_cell.lattice.angles[1], 90)
def test_get_slabs(self):
gen = SlabGenerator(self.get_structure("CsCl"), [0, 0, 1], 10, 10)
# Test orthogonality of some internal variables.
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
self.assertEqual(len(gen.get_slabs()), 1)
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
self.assertEqual(len(gen.get_slabs()), 5)
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3})), 2)
# There are no slabs in LFP that does not break either P-O or Fe-O
# bonds for a miller index of [0, 0, 1].
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3})), 0)
# If we allow some broken bonds, there are a few slabs.
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3},
max_broken_bonds=2)), 2)
# At this threshold, only the origin and center Li results in
# clustering. All other sites are non-clustered. So the of
# slabs is of sites in LiFePO4 unit cell - 2 + 1.
self.assertEqual(len(gen.get_slabs(tol=1e-4, ftol=1e-4)), 15)
LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
gen = SlabGenerator(LiCoO2, [0, 0, 1], 10, 10)
lco = gen.get_slabs(bonds={("Co", "O"): 3})
self.assertEqual(len(lco), 1)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
scc = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(scc, [0, 0, 1], 10, 10)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
gen = SlabGenerator(scc, [1, 1, 1], 10, 10, max_normal_search=1)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
# Test whether using units of hkl planes instead of Angstroms for
# min_slab_size and min_vac_size will give us the same number of atoms
natoms = []
for a in [1, 1.4, 2.5, 3.6]:
s = Structure.from_spacegroup("Im-3m", Lattice.cubic(a), ["Fe"],
[[0, 0, 0]])
slabgen = SlabGenerator(s, (1, 1, 1), 10, 10, in_unit_planes=True,
max_normal_search=2)
natoms.append(len(slabgen.get_slab()))
n = natoms[0]
for i in natoms:
self.assertEqual(n, i)
def test_triclinic_TeI(self):
# Test case for a triclinic structure of TeI. Only these three
# Miller indices are used because it is easier to identify which
# atoms should be in a surface together. The closeness of the sites
# in other Miller indices can cause some ambiguity when choosing a
# higher tolerance.
numb_slabs = {(0, 0, 1): 5, (0, 1, 0): 3, (1, 0, 0): 7}
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
for k, v in numb_slabs.items():
trclnc_TeI = SlabGenerator(TeI, k, 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
self.assertEqual(v, len(TeI_slabs))
def test_get_orthogonal_c_slab(self):
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
trclnc_TeI = SlabGenerator(TeI, (0, 0, 1), 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
slab = TeI_slabs[0]
norm_slab = slab.get_orthogonal_c_slab()
self.assertAlmostEqual(norm_slab.lattice.angles[0], 90)
self.assertAlmostEqual(norm_slab.lattice.angles[1], 90)
def test_get_orthogonal_c_slab_site_props(self):
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
trclnc_TeI = SlabGenerator(TeI, (0, 0, 1), 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
slab = TeI_slabs[0]
# Add site property to slab
sd_list = [[True, True, True] for site in slab.sites]
new_sp = slab.site_properties
new_sp['selective_dynamics'] = sd_list
slab_with_site_props = slab.copy(site_properties=new_sp)
# Get orthogonal slab
norm_slab = slab_with_site_props.get_orthogonal_c_slab()
# Check if site properties is consistent (or kept)
self.assertEqual(slab_with_site_props.site_properties, norm_slab.site_properties)
def test_get_tasker2_slabs(self):
# The uneven distribution of ions on the (111) facets of Halite
# type slabs are typical examples of Tasker 3 structures. We
# will test this algo to generate a Tasker 2 structure instead
slabgen = SlabGenerator(self.MgO, (1, 1, 1), 10, 10,
max_normal_search=1)
# We generate the Tasker 3 structure first
slab = slabgen.get_slabs()[0]
self.assertFalse(slab.is_symmetric())
self.assertTrue(slab.is_polar())
# Now to generate the Tasker 2 structure, we must
# ensure there are enough ions on top to move around
slab.make_supercell([2, 1, 1])
slabs = slab.get_tasker2_slabs()
# Check if our Tasker 2 slab is nonpolar and symmetric
for slab in slabs:
self.assertTrue(slab.is_symmetric())
self.assertFalse(slab.is_polar())
def test_nonstoichiometric_symmetrized_slab(self):
# For the (111) halite slab, sometimes a nonstoichiometric
# system is preferred over the stoichiometric Tasker 2.
slabgen = SlabGenerator(self.MgO, (1, 1, 1), 10, 10,
max_normal_search=1)
slabs = slabgen.get_slabs(symmetrize=True)
# We should end up with two terminations, one with
# an Mg rich surface and another O rich surface
self.assertEqual(len(slabs), 2)
for slab in slabs:
self.assertTrue(slab.is_symmetric())
# For a low symmetry primitive_elemental system such as
# R-3m, there should be some nonsymmetric slabs
# without using nonstoichiometric_symmetrized_slab
slabs = generate_all_slabs(self.Dy, 1, 30, 30,
center_slab=True, symmetrize=True)
for s in slabs:
self.assertTrue(s.is_symmetric())
self.assertGreater(len(s), len(self.Dy))
def test_move_to_other_side(self):
# Tests to see if sites are added to opposite side
s = self.get_structure("LiFePO4")
slabgen = SlabGenerator(s, (0, 0, 1), 10, 10, center_slab=True)
slab = slabgen.get_slab()
surface_sites = slab.get_surface_sites()
# check if top sites are moved to the bottom
top_index = [ss[1] for ss in surface_sites["top"]]
slab = slabgen.move_to_other_side(slab, top_index)
all_bottom = [slab[i].frac_coords[2] < slab.center_of_mass[2]
for i in top_index]
self.assertTrue(all(all_bottom))
# check if bottom sites are moved to the top
bottom_index = [ss[1] for ss in surface_sites["bottom"]]
slab = slabgen.move_to_other_side(slab, bottom_index)
all_top = [slab[i].frac_coords[2] > slab.center_of_mass[2]
for i in bottom_index]
self.assertTrue(all(all_top))
class ReconstructionGeneratorTests(PymatgenTest):
def setUp(self):
l = Lattice.cubic(3.51)
species = ["Ni"]
coords = [[0, 0, 0]]
self.Ni = Structure.from_spacegroup("Fm-3m", l, species, coords)
l = Lattice.cubic(2.819000)
species = ["Fe"]
coords = [[0, 0, 0]]
self.Fe = Structure.from_spacegroup("Im-3m", l, species, coords)
self.Si = Structure.from_spacegroup("Fd-3m", Lattice.cubic(5.430500),
["Si"], [(0, 0, 0.5)])
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..",
"reconstructions_archive.json")) as data_file:
self.rec_archive = json.load(data_file)
def test_build_slab(self):
# First lets test a reconstruction where we only remove atoms
recon = ReconstructionGenerator(self.Ni, 10, 10,
"fcc_110_missing_row_1x2")
slab = recon.get_unreconstructed_slabs()[0]
recon_slab = recon.build_slabs()[0]
self.assertTrue(recon_slab.reconstruction)
self.assertEqual(len(slab), len(recon_slab) + 2)
self.assertTrue(recon_slab.is_symmetric())
# Test if the ouc corresponds to the reconstructed slab
recon_ouc = recon_slab.oriented_unit_cell
ouc = slab.oriented_unit_cell
self.assertEqual(ouc.lattice.b * 2, recon_ouc.lattice.b)
self.assertEqual(len(ouc) * 2, len(recon_ouc))
# Test a reconstruction where we simply add atoms
recon = ReconstructionGenerator(self.Ni, 10, 10,
"fcc_111_adatom_t_1x1")
slab = recon.get_unreconstructed_slabs()[0]
recon_slab = recon.build_slabs()[0]
self.assertEqual(len(slab), len(recon_slab) - 2)
self.assertTrue(recon_slab.is_symmetric())
# If a slab references another slab,
# make sure it is properly generated
recon = ReconstructionGenerator(self.Ni, 10, 10,
"fcc_111_adatom_ft_1x1")
slab = recon.build_slabs()[0]
self.assertTrue(slab.is_symmetric)
# Test a reconstruction where it works on a specific
# termination (Fd-3m (111))
recon = ReconstructionGenerator(self.Si, 10, 10,
"diamond_111_1x2")
slab = recon.get_unreconstructed_slabs()[0]
recon_slab = recon.build_slabs()[0]
self.assertEqual(len(slab), len(recon_slab) - 8)
self.assertTrue(recon_slab.is_symmetric())
# Test a reconstruction where terminations give
# different reconstructions with a non-primitive_elemental system
def test_get_d(self):
# Ensure that regardles of the size of the vacuum or slab
# layer, the spacing between atomic layers should be the same
recon = ReconstructionGenerator(self.Si, 10, 10,
"diamond_100_2x1")
recon2 = ReconstructionGenerator(self.Si, 20, 10,
"diamond_100_2x1")
s1 = recon.get_unreconstructed_slabs()[0]
s2 = recon2.get_unreconstructed_slabs()[0]
self.assertAlmostEqual(get_d(s1), get_d(s2))
@unittest.skip("This test relies on neighbor orders and is hard coded. Disable temporarily")
def test_previous_reconstructions(self):
# Test to see if we generated all reconstruction
# types correctly and nothing changes
m = StructureMatcher()
for n in self.rec_archive.keys():
if "base_reconstruction" in self.rec_archive[n].keys():
arch = self.rec_archive[
self.rec_archive[n]["base_reconstruction"]]
sg = arch["spacegroup"]["symbol"]
else:
sg = self.rec_archive[n]["spacegroup"]["symbol"]
if sg == "Fm-3m":
rec = ReconstructionGenerator(self.Ni, 20, 20, n)
el = self.Ni[0].species_string
elif sg == "Im-3m":
rec = ReconstructionGenerator(self.Fe, 20, 20, n)
el = self.Fe[0].species_string
elif sg == "Fd-3m":
rec = ReconstructionGenerator(self.Si, 20, 20, n)
el = self.Si[0].species_string
slabs = rec.build_slabs()
s = Structure.from_file(get_path(os.path.join("reconstructions",
el + "_" + n + ".cif")))
self.assertTrue(any(
[len(m.group_structures([s, slab])) == 1 for slab in slabs]))
class MillerIndexFinderTests(PymatgenTest):
def setUp(self):
self.cscl = Structure.from_spacegroup(
"Pm-3m", Lattice.cubic(4.2), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.Fe = Structure.from_spacegroup(
"Im-3m", Lattice.cubic(2.82), ["Fe"],
[[0, 0, 0]])
mglatt = Lattice.from_parameters(3.2, 3.2, 5.13, 90, 90, 120)
self.Mg = Structure(mglatt, ["Mg", "Mg"],
[[1 / 3, 2 / 3, 1 / 4],
[2 / 3, 1 / 3, 3 / 4]])
self.lifepo4 = self.get_structure("LiFePO4")
self.tei = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
self.LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
self.p1 = Structure(Lattice.from_parameters(3, 4, 5, 31, 43, 50),
["H", "He"], [[0, 0, 0], [0.1, 0.2, 0.3]])
self.graphite = self.get_structure("Graphite")
self.trigBi = Structure(Lattice.from_parameters(3, 3, 10, 90, 90, 120),
["Bi", "Bi", "Bi", "Bi", "Bi", "Bi"],
[[0.3333, 0.6666, 0.39945113],
[0.0000, 0.0000, 0.26721554],
[0.0000, 0.0000, 0.73278446],
[0.6666, 0.3333, 0.60054887],
[0.6666, 0.3333, 0.06611779],
[0.3333, 0.6666, 0.93388221]])
def test_get_symmetrically_distinct_miller_indices(self):
# Tests to see if the function obtains the known number of unique slabs
indices = get_symmetrically_distinct_miller_indices(self.cscl, 1)
self.assertEqual(len(indices), 3)
indices = get_symmetrically_distinct_miller_indices(self.cscl, 2)
self.assertEqual(len(indices), 6)
self.assertEqual(
len(get_symmetrically_distinct_miller_indices(self.lifepo4, 1)), 7)
# The TeI P-1 structure should have 13 unique millers (only inversion
# symmetry eliminates pairs)
indices = get_symmetrically_distinct_miller_indices(self.tei, 1)
self.assertEqual(len(indices), 13)
# P1 and P-1 should have the same # of miller indices since surfaces
# always have inversion symmetry.
indices = get_symmetrically_distinct_miller_indices(self.p1, 1)
self.assertEqual(len(indices), 13)
indices = get_symmetrically_distinct_miller_indices(self.graphite, 2)
self.assertEqual(len(indices), 12)
# Now try a trigonal system.
indices = get_symmetrically_distinct_miller_indices(self.trigBi, 2, return_hkil=True)
self.assertEqual(len(indices), 17)
self.assertTrue(all([len(hkl) == 4 for hkl in indices]))
def test_get_symmetrically_equivalent_miller_indices(self):
# Tests to see if the function obtains all equivalent hkl for cubic (100)
indices001 = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, -1), (0, -1, 0), (-1, 0, 0)]
indices = get_symmetrically_equivalent_miller_indices(self.cscl, (1, 0, 0))
self.assertTrue(all([hkl in indices for hkl in indices001]))
# Tests to see if it captures expanded Miller indices in the family e.g. (001) == (002)
hcp_indices_100 = get_symmetrically_equivalent_miller_indices(self.Mg, (1, 0, 0))
hcp_indices_200 = get_symmetrically_equivalent_miller_indices(self.Mg, (2, 0, 0))
self.assertEqual(len(hcp_indices_100) * 2, len(hcp_indices_200))
self.assertEqual(len(hcp_indices_100), 6)
self.assertTrue(all([len(hkl) == 4 for hkl in hcp_indices_100]))
def test_generate_all_slabs(self):
slabs = generate_all_slabs(self.cscl, 1, 10, 10)
# Only three possible slabs, one each in (100), (110) and (111).
self.assertEqual(len(slabs), 3)
# make sure it generates reconstructions
slabs = generate_all_slabs(self.Fe, 1, 10, 10,
include_reconstructions=True)
# Four possible slabs, (100), (110), (111) and the zigzag (100).
self.assertEqual(len(slabs), 4)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4})
# No slabs if we don't allow broken Cs-Cl
self.assertEqual(len(slabs), 0)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4},
max_broken_bonds=100)
self.assertEqual(len(slabs), 3)
slabs2 = generate_all_slabs(self.lifepo4, 1, 10, 10,
bonds={("P", "O"): 3, ("Fe", "O"): 3})
self.assertEqual(len(slabs2), 0)
# There should be only one possible stable surfaces, all of which are
# in the (001) oriented unit cell
slabs3 = generate_all_slabs(self.LiCoO2, 1, 10, 10,
bonds={("Co", "O"): 3})
self.assertEqual(len(slabs3), 1)
mill = (0, 0, 1)
for s in slabs3:
self.assertEqual(s.miller_index, mill)
slabs1 = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1,
bonds={("P", "O"): 3})
self.assertEqual(len(slabs1), 4)
# Now we test this out for repair_broken_bonds()
slabs1_repair = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1,
bonds={("P", "O"): 3}, repair=True)
self.assertGreater(len(slabs1_repair), len(slabs1))
# Lets see if there are no broken PO4 polyhedrons
miller_list = get_symmetrically_distinct_miller_indices(self.lifepo4, 1)
all_miller_list = []
for slab in slabs1_repair:
hkl = tuple(slab.miller_index)
if hkl not in all_miller_list:
all_miller_list.append(hkl)
broken = []
for site in slab:
if site.species_string == "P":
neighbors = slab.get_neighbors(site, 3)
cn = 0
for nn in neighbors:
cn += 1 if nn[0].species_string == "O" else 0
broken.append(cn != 4)
self.assertFalse(any(broken))
# check if we were able to produce at least one
# termination for each distinct Miller _index
self.assertEqual(len(miller_list), len(all_miller_list))
def test_miller_index_from_sites(self):
"""Test surface miller index convenience function"""
# test on a cubic system
m = Lattice.cubic(1)
s1 = np.array([0.5, -1.5, 3])
s2 = np.array([0.5, 3., -1.5])
s3 = np.array([2.5, 1.5, -4.])
self.assertEqual(miller_index_from_sites(m, [s1, s2, s3]),
(2, 1, 1))
# test casting from matrix to Lattice
m = [[2.319, -4.01662582, 0.], [2.319, 4.01662582, 0.], [0., 0., 7.252]]
s1 = np.array([2.319, 1.33887527, 6.3455])
s2 = np.array([1.1595, 0.66943764, 4.5325])
s3 = np.array([1.1595, 0.66943764, 0.9065])
hkl = miller_index_from_sites(m, [s1, s2, s3])
self.assertEqual(hkl, (2, -1, 0))
if __name__ == "__main__":
unittest.main()
| mbkumar/pymatgen | pymatgen/core/tests/test_surface.py | Python | mit | 35,374 | [
"pymatgen"
] | ea48e5be1834160cd9e1bfe10281013827c96568af5e8b928ad1bba2033b079b |
# -*- coding: utf-8 -*-
"""
Convert the simuPOP documentation to Sphinx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import sys
import os, re
import sys
import os
import glob
import shutil
import codecs
from os import path
from converter.tokenizer import Tokenizer
from converter.latexparser import DocParser
from converter.restwriter import RestWriter
from converter.filenamemap import (fn_mapping, copyfiles_mapping, newfiles_mapping,
rename_mapping, dirs_to_make, toctree_mapping,
amendments_mapping)
from converter.console import red, green
from converter.docnodes import CommentNode, RootNode, NodeList, ParaSepNode, \
TextNode, EmptyNode, NbspNode, SimpleCmdNode, BreakNode, CommandNode, \
DescLineCommandNode, InlineNode, IndexNode, SectioningNode, \
EnvironmentNode, DescEnvironmentNode, TableNode, VerbatimNode, \
ListNode, ItemizeNode, EnumerateNode, DescriptionNode, \
DefinitionsNode, ProductionListNode
from converter.util import umlaut, empty, text
from converter.latexparser import ParserError
class MyDocParser(DocParser):
def __init__(self, *args, **kwargs):
DocParser.__init__(self, *args, **kwargs)
def mk_metadata_handler(self, name, mdname=None, arg='M'):
if mdname is None:
mdname = name
def handler(self):
data = self.parse_args('\\'+name, arg)
self.rootnode.params[mdname] = data[0]
return EmptyNode()
return handler
handle_color = mk_metadata_handler(None, 'color', None, 'M')
handle_lstset = mk_metadata_handler(None, 'lstset')
handle_setcounter = mk_metadata_handler(None, 'setcounter', None, 'MM')
handle_hypersetup = mk_metadata_handler(None, 'hypersetup')
handle_definecolor = mk_metadata_handler(None, 'definecolor', None, 'MMM')
handle_sectionfont = mk_metadata_handler(None, 'sectionfont', None, 'O')
handle_subsectionfont = mk_metadata_handler(None, 'subsectionfont', None, 'O')
handle_subsubsectionfont = mk_metadata_handler(None, 'subsubsectionfont', None, 'O')
handle_makeatother = mk_metadata_handler(None, 'makeatother', None, 'O')
handle_totalheight = mk_metadata_handler(None, 'totalheight', None, 'O')
handle_columnwidth = mk_metadata_handler(None, 'columnwidth', None, 'O')
handle_vspace = mk_metadata_handler(None, 'vspace', None, 'M')
handle_hspace = mk_metadata_handler(None, 'hspace', None, 'M')
handle_hrule = mk_metadata_handler(None, 'hrule', None, 'O')
handle_lstlistoflistings = mk_metadata_handler(None, 'lstlistoflistings', None, 'O')
handle_centering = mk_metadata_handler(None, 'centering', None, 'M')
handle_textwidth = mk_metadata_handler(None, 'textwidth', None, 'O')
handle_end = mk_metadata_handler(None, 'end', None, 'O')
handle_textendash = mk_metadata_handler(None, 'textendash', None, 'O')
#handle_item = mk_metadata_handler(None, 'item', None, 'O')
handle_textmd = mk_metadata_handler(None, 'textmd', None, 'O')
handle_normalsize = mk_metadata_handler(None, 'normalsize', None, 'O')
handle_textcompwordmark = mk_metadata_handler(None, 'textcompwordmark', None, 'O')
handle_citep = mk_metadata_handler(None, 'citep', None, 'O')
handle_citet = mk_metadata_handler(None, 'citet', None, 'O')
handle_citeyearpar = mk_metadata_handler(None, 'citeyearpar', None, 'O')
handle_bibliographystyle = mk_metadata_handler(None, 'bibliographystyle', None, 'O')
handle_bibliography = mk_metadata_handler(None, 'bibliography', None, 'O')
handle_printindex = mk_metadata_handler(None, 'printindex', None, 'O')
def handle_minipage_env(self):
# Ignore the minipage part.
txt = ''
while not txt.endswith('end{minipage}'):
nextl, nextt, nextv, nextr = self.tokens.pop()
txt += nextv
return EmptyNode()
def handle_include(self):
data = self.parse_args('\\include', 'M')[0].text
return EmptyNode()
def handle_newenvironment(self, numOpt=3):
txt = ''
opt = 0
depth = 0
while True:
nextl, nextt, nextv, nextr = self.tokens.pop()
if nextr == '{' or nextr == '[':
depth += 1
elif nextr == '}' or nextr == ']':
depth -= 1
if nextr == '}' and depth == 0:
opt += 1
if opt == numOpt:
break
return EmptyNode()
def handle_newcommand(self):
return self.handle_newenvironment(2)
def handle_unrecognized(self, name, line):
def handler():
#self.unrecognized.add(name)
return InlineNode('include', name)
return handler
handle_small = mk_metadata_handler(None, '\\small', None, 'O')
handle_ttfamily = mk_metadata_handler(None, '\\small', None, 'O')
handle_textsf = mk_metadata_handler(None, '\\textsf', None, 'O')
handle_slshape = mk_metadata_handler(None, '\\small', None, 'O')
handle_bf = mk_metadata_handler(None, '\\small', None, 'O')
handle_makeatletter = mk_metadata_handler(None, 'makeatletter', None, 'O')
handle_lyxline = mk_metadata_handler(None, 'lyxline', None, 'O')
handle_par = mk_metadata_handler(None, 'par', None, 'O')
handle_rule = mk_metadata_handler(None, 'rule', None, 'O')
handle_hfill = mk_metadata_handler(None, 'hfill', None, 'O')
handle_sloppy = mk_metadata_handler(None, 'sloppy', None, 'O')
handle_lstlistingname = mk_metadata_handler(None, 'lstlistingname', None, 'O')
handle_lstlistlistingname = mk_metadata_handler(None, 'lstlistlistingname', None, 'O')
class MyRestWriter(RestWriter):
def __init__(self, dir = '.', auto_keywords = {}, *args, **kwargs):
RestWriter.__init__(self, *args, **kwargs)
self.dirname = dir
if self.dirname == '':
self.dirname = '.'
self.auto_keywords = auto_keywords
def visit_InlineNode(self, node):
cmdname = node.cmdname
if not node.args:
self.curpar.append(self.simplecmd_mapping[cmdname])
return
content = node.args[0]
if cmdname == 'include':
file = node.args
for dir in ['.', self.dirname, 'build']:
for suffix in ['', '.ref', '.rst', '.txt']:
filename = os.path.join(dir, file + suffix)
if os.path.isfile(filename):
txt = open(filename).read()
self.write(txt)
return
print 'Warning: Failed to find included file for filename "%s".' % file
return
sym = ''
if cmdname in ('code', 'bfcode', 'samp', 'texttt', 'regexp'):
sym = '``'
elif cmdname in ('strong', 'textbf'):
sym = '**'
if sym != '':
cnt = self.get_textonly_node(content, 'code', warn=1)
done = False
if isinstance(cnt, TextNode):
for keyword in self.auto_keywords.keys():
txt = text(cnt).split('(')[0]
leftover = text(cnt)[len(txt):]
match = False
if txt in self.auto_keywords[keyword]:
self.curpar.append(':%s:`%s`' % (keyword, txt))
match = True
elif '.' in txt:
ends = [x.endswith(txt) for x in self.auto_keywords[keyword]]
if True in ends:
fullword = self.auto_keywords[keyword][ends.index(True)]
self.curpar.append(':%s:`~%s`' % (keyword, fullword))
match = True
if match:
if leftover != '':
if leftover.startswith('()'):
# sphinx ignores them!!
self.curpar.append("()")
if len(leftover) > 2:
self.curpar.append("%s%s%s" % (sym, leftover[2:], sym))
elif leftover.startswith('(') and leftover.endswith(')'):
self.curpar.append("(%s%s%s)" % (sym, leftover[1:-1], sym))
else:
self.curpar.append("%s%s%s" % (sym, leftover, sym))
return
# the regular emph/strong case and other stuff
RestWriter.visit_InlineNode(self, node)
def visit_CommentNode(self, node):
# no inline comments -> they are all output at the start of a new paragraph
pass #self.comments.append(node.comment.strip())
def visit_CommandNode(self, node):
cmdname = node.cmdname
if cmdname == 'example_url':
file = text(node.args[0])
file = file.replace('.log', '.py')
# get the file
txt = open(file).read()
outfilename = os.path.split(file)[-1]
with open(os.path.join('build', outfilename), 'w') as outfile:
print >> outfile, '''#!/usr/bin/env python
#
# $File: %s $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
''' % outfilename
print >> outfile, txt
# insert a URL
self.write('`Download %s <%s>`_\n' % (outfilename, outfilename))
else:
RestWriter.visit_CommandNode(self, node)
def convert_file(infile, outfile, doraise=True, splitchap=True,
toctree=None, deflang=None, labelprefix=''):
inf = codecs.open(infile, 'r', 'latin1')
p = MyDocParser(Tokenizer(inf.read()).tokenize(), infile)
if not splitchap:
outf = codecs.open(outfile, 'w', 'utf-8')
else:
outf = None
refFile = os.path.join(os.path.dirname(infile), 'reflist.py')
if os.path.isfile(refFile):
execfile(refFile, globals(), globals())
r = MyRestWriter(os.path.dirname(infile), auto_keywords, outf, splitchap, toctree, deflang, labelprefix)
try:
r.write_document(p.parse())
if splitchap:
outf = codecs.open(outfile, 'w', 'utf-8')
outf.write('.. toctree::\n \n') # :numbered:\n \n')
for ch,chapter in enumerate(r.chapters[1:]):
dir = path.dirname(outfile)
if dir == '':
dir = '.'
chtoc = '%s/%s' % (dir, path.basename(outfile))
chtoc = chtoc.replace('.rst', '_ch%d.rst' % (ch+1))
outf.write(' %s\n' % chtoc[len('%s/' % dir):])
choutf = codecs.open(chtoc, 'w', 'utf-8')
choutf.write(chapter[0].getvalue())
if len(chapter) > 1:
choutf.write('\n.. toctree::\n\n')
for sec,section in enumerate(chapter[1:]):
filename = '%s/%s' % (dir, path.basename(outfile))
filename = filename.replace('.rst', '_ch%d_sec%d.rst' % (ch + 1, sec + 1))
sec_outf = codecs.open(filename, 'w', 'utf-8')
sec_outf.write(section.getvalue())
sec_outf.close()
choutf.write(' %s\n' % filename[len('%s/' % dir):])
choutf.close()
outf.close()
else:
outf.close()
p.finish()
return 1, r.warnings
except Exception, err:
if doraise:
raise
return 0, str(err)
if __name__ == '__main__':
convert_file(*sys.argv[1:])
| BoPeng/simuPOP | doc/tools/convert.py | Python | gpl-2.0 | 12,679 | [
"VisIt"
] | 814ccfc9923431302f3cdeacc17b481cacaa490598f1ef57d01c8cbb0173af5a |
import logging
import numpy as np
import scipy.ndimage as ndim
from skimage.morphology import binary_dilation
from scipy.spatial.distance import dice
import nibabel as nib
from dipy.io.utils import (create_nifti_header, get_reference_info)
from dipy.tracking.streamline import select_random_set_of_streamlines
import dipy.tracking.utils as dtu
logger = logging.getLogger('AFQ.utils.volume')
def transform_inverse_roi(roi, mapping, bundle_name="ROI"):
"""
After being non-linearly transformed, ROIs tend to have holes in them.
We perform a couple of computational geometry operations on the ROI to
fix that up.
Parameters
----------
roi : Nifti1Image, str, ndarray
The ROI to transform. Can be a path or image, which will be
converted to an ndarray.
mapping : DiffeomorphicMap object
A mapping between DWI space and a template.
bundle_name : str, optional
Name of bundle, which may be useful for error messages.
Default: None
Returns
-------
ROI after dilation and hole-filling
"""
if isinstance(roi, str):
roi = nib.load(roi)
if isinstance(roi, nib.Nifti1Image):
roi = roi.get_fdata()
_roi = mapping.transform_inverse(roi, interpolation='linear')
if np.sum(_roi) == 0:
logger.warning(
f'Lost ROI {bundle_name}, performing automatic binary dilation')
_roi = binary_dilation(roi)
_roi = mapping.transform_inverse(_roi, interpolation='linear')
_roi = patch_up_roi(_roi > 0, bundle_name=bundle_name).astype(int)
return _roi
def patch_up_roi(roi, bundle_name="ROI", make_convex=True):
"""
After being non-linearly transformed, ROIs tend to have holes in them.
We perform a couple of computational geometry operations on the ROI to
fix that up.
Parameters
----------
roi : 3D binary array
The ROI after it has been transformed.
sigma : float
The sigma for initial Gaussian smoothing.
truncate : float
The truncation for the Gaussian
bundle_name : str, optional
Name of bundle, which may be useful for error messages.
Default: None
Returns
-------
ROI after dilation and hole-filling
"""
hole_filled = ndim.binary_fill_holes(roi > 0)
if not np.any(hole_filled):
raise ValueError((
f"{bundle_name} found to be empty after "
"applying the mapping."))
return hole_filled
def density_map(tractogram, n_sls=None, to_vox=False, normalize=False):
"""
Create a streamline density map.
based on:
https://dipy.org/documentation/1.1.1./examples_built/streamline_formats/
Parameters
----------
tractogram : StatefulTractogram
Stateful tractogram whose streamlines are used to make
the density map.
n_sls : int or None, optional
n_sls to randomly select to make the density map.
If None, all streamlines are used.
Default: None
to_vox : bool, optional
Whether to put the stateful tractogram in VOX space before making
the density map.
Default: False
normalize : bool, optional
Whether to normalize maximum values to 1.
Default: False
Returns
-------
Nifti1Image containing the density map.
"""
if to_vox:
tractogram.to_vox()
sls = tractogram.streamlines
if n_sls is not None:
sls = select_random_set_of_streamlines(sls, n_sls)
affine, vol_dims, voxel_sizes, voxel_order = get_reference_info(tractogram)
tractogram_density = dtu.density_map(sls, np.eye(4), vol_dims)
if normalize:
tractogram_density = tractogram_density / tractogram_density.max()
nifti_header = create_nifti_header(affine, vol_dims, voxel_sizes)
density_map_img = nib.Nifti1Image(tractogram_density, affine, nifti_header)
return density_map_img
def dice_coeff(arr1, arr2, weighted=True):
"""
Compute Dice's coefficient between two images.
Parameters
----------
arr1 : Nifti1Image, str, ndarray
One ndarray to compare. Can be a path or image, which will be
converted to an ndarray.
arr2 : Nifti1Image, str, ndarray
The other ndarray to compare. Can be a path or image, which will be
converted to an ndarray.
weighted : bool, optional
Whether or not to weight the DICE coefficient as in [Cousineau2017]_.
The weighted Dice coefficient is calculated by adding the sum of all
values in arr1 where arr2 is nonzero to the sum of all values in arr2
where arr1 is nonzero, then dividing that by the sum of all values in
arr1 and arr2.
Default: True
Returns
-------
The dice similarity between the images.
Notes
-----
.. [1] Cousineau M, Jodoin PM, Morency FC, et al. A test-retest study on
Parkinson's PPMI dataset yields statistically significant white
matter fascicles. Neuroimage Clin. 2017;16:222-233. Published 2017
Jul 25. doi:10.1016/j.nicl.2017.07.020
"""
if isinstance(arr1, str):
arr1 = nib.load(arr1)
if isinstance(arr2, str):
arr2 = nib.load(arr2)
if isinstance(arr1, nib.Nifti1Image):
arr1 = arr1.get_fdata()
if isinstance(arr2, nib.Nifti1Image):
arr2 = arr2.get_fdata()
arr1 = arr1.flatten()
arr2 = arr2.flatten()
if weighted:
return (
np.sum(arr1 * arr2.astype(bool))
+ np.sum(arr2 * arr1.astype(bool)))\
/ (np.sum(arr1) + np.sum(arr2))
else:
# scipy's dice function returns the dice *dissimilarity*
return 1 - dice(
arr1.astype(bool),
arr2.astype(bool))
| arokem/pyAFQ | AFQ/utils/volume.py | Python | bsd-2-clause | 5,768 | [
"Gaussian"
] | 088416cf8d00e6ee2f0d08336d2601cf4191006030ee13d02c269428bb3215b7 |
"""Tests for wien2k interface."""
import os
import numpy as np
from phonopy.interface.phonopy_yaml import read_cell_yaml
from phonopy.interface.wien2k import parse_wien2k_struct
data_dir = os.path.dirname(os.path.abspath(__file__))
def test_parse_wien2k_struct():
"""Test structure parsing."""
filename_BaGa2 = os.path.join(data_dir, "BaGa2.struct")
cell, _, _, _ = parse_wien2k_struct(filename_BaGa2)
filename = os.path.join(data_dir, "BaGa2-wien2k.yaml")
cell_ref = read_cell_yaml(filename)
assert (np.abs(cell.cell - cell_ref.cell) < 1e-5).all()
diff_pos = cell.scaled_positions - cell_ref.scaled_positions
diff_pos -= np.rint(diff_pos)
assert (np.abs(diff_pos) < 1e-5).all()
for s, s_r in zip(cell.symbols, cell_ref.symbols):
assert s == s_r
| atztogo/phonopy | test/interface/test_wien2k.py | Python | bsd-3-clause | 799 | [
"WIEN2k",
"phonopy"
] | c7d7b84bc76c4bfe65f44c47ac92b5535b6b47312260b7bd57bb68ec46db39cd |
import logging
from post_request_task.task import task
from mkt.feed.models import FeedApp, FeedCollection
log = logging.getLogger('z.feed')
@task
def _migrate_collection_colors(ids, model):
"""Migrate deprecated background color (hex) to color (name)."""
cls = FeedApp
if model == 'collection':
cls = FeedCollection
for obj in cls.objects.filter(id__in=ids):
if obj.background_color and not obj.color:
try:
color = {
'#CE001C': 'ruby',
'#F78813': 'amber',
'#00953F': 'emerald',
'#0099D0': 'aquamarine',
'#1E1E9C': 'sapphire',
'#5A197E': 'amethyst',
'#A20D55': 'garnet'
}.get(obj.background_color, 'aquamarine')
except KeyError:
continue
obj.update(color=color)
log.info('Migrated %s:%s from %s to %s' %
(model, unicode(obj.id), obj.background_color, color))
| jasonthomas/zamboni | mkt/feed/tasks.py | Python | bsd-3-clause | 1,054 | [
"Amber"
] | 2e22a33207d693aac69598dec17993702fd9facbbb973d787c9aae66329bdab6 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Interfaces to assorted Freesurfer utility programs.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
import re
from nipype.utils.filemanip import fname_presuffix, split_filename
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import TraitedSpec, File, traits, OutputMultiPath, isdefined
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
class SampleToSurfaceInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--mov %s",
desc="volume to sample values from")
reference_file = File(exists=True, argstr="--ref %s",
desc="reference volume (default is orig.mgz)")
hemi = traits.Enum("lh", "rh", mandatory=True, argstr="--hemi %s",
desc="target hemisphere")
surface = traits.String(argstr="--surf", desc="target surface (default is white)")
reg_xors = ["reg_file", "reg_header", "mni152reg"]
reg_file = File(exists=True, argstr="--reg %s", required=True, xor=reg_xors,
desc="source-to-reference registration file")
reg_header = traits.Bool(argstr="--regheader %s", requires=["subject_id"],
required=True, xor=reg_xors,
desc="register based on header geometry")
mni152reg = traits.Bool(argstr="--mni152reg",
required=True, xor=reg_xors,
desc="source volume is in MNI152 space")
apply_rot = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--rot %.3f %.3f %.3f",
desc="rotation angles (in degrees) to apply to reg matrix")
apply_trans = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr="--trans %.3f %.3f %.3f",
desc="translation (in mm) to apply to reg matrix")
override_reg_subj = traits.Bool(argstr="--srcsubject %s", requires=["subject_id"],
desc="override the subject in the reg file header")
sampling_method = traits.Enum("point", "max", "average",
mandatory=True, argstr="%s", xor=["projection_stem"],
requires=["sampling_range", "sampling_units"],
desc="how to sample -- at a point or at the max or average over a range")
sampling_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="sampling range - a point or a tuple of (min, max, step)")
sampling_units = traits.Enum("mm", "frac", desc="sampling range type -- either 'mm' or 'frac'")
projection_stem = traits.String(mandatory=True, xor=["sampling_method"],
desc="stem for precomputed linear estimates and volume fractions")
smooth_vol = traits.Float(argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)")
smooth_surf = traits.Float(argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)")
interp_method = traits.Enum("nearest", "trilinear", argstr="--interp %s",
desc="interpolation method")
cortex_mask = traits.Bool(argstr="--cortex", xor=["mask_label"],
desc="mask the target surface with hemi.cortex.label")
mask_label = File(exists=True, argstr="--mask %s", xor=["cortex_mask"],
desc="label file to mask output with")
float2int_method = traits.Enum("round", "tkregister", argstr="--float2int %s",
desc="method to convert reg matrix values (default is round)")
fix_tk_reg = traits.Bool(argstr="--fixtkreg", desc="make reg matrix round-compatible")
subject_id = traits.String(desc="subject id")
target_subject = traits.String(argstr="--trgsubject %s",
desc="sample to surface of different subject than source")
surf_reg = traits.Bool(argstr="--surfreg", requires=["target_subject"],
desc="use surface registration to target subject")
ico_order = traits.Int(argstr="--icoorder %d", requires=["target_subject"],
desc="icosahedron order when target_subject is 'ico'")
reshape = traits.Bool(argstr="--reshape", xor=["no_reshape"],
desc="reshape surface vector to fit in non-mgh format")
no_reshape = traits.Bool(argstr="--noreshape", xor=["reshape"],
desc="do not reshape surface vector (default)")
reshape_slices = traits.Int(argstr="--rf %d", desc="number of 'slices' for reshaping")
scale_input = traits.Float(argstr="--scale %.3f",
desc="multiple all intensities by scale factor")
frame = traits.Int(argstr="--frame %d", desc="save only one frame (0-based)")
out_file = File(argstr="--o %s", genfile=True, desc="surface file to write")
out_type = traits.Enum(filetypes, argstr="--out_type %s", desc="output file type")
hits_file = traits.Either(traits.Bool, File(exists=True), argstr="--srchit %s",
desc="save image with number of hits at each voxel")
hits_type = traits.Enum(filetypes, argstr="--srchit_type", desc="hits file type")
vox_file = traits.Either(traits.Bool, File, argstr="--nvox %s",
desc="text file with the number of voxels intersecting the surface")
class SampleToSurfaceOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="surface file")
hits_file = File(exists=True, desc="image with number of hits at each voxel")
vox_file = File(exists=True,
desc="text file with the number of voxels intersecting the surface")
class SampleToSurface(FSCommand):
"""Sample a volume to the cortical surface using Freesurfer's mri_vol2surf.
You must supply a sampling method, range, and units. You can project
either a given distance (in mm) or a given fraction of the cortical
thickness at that vertex along the surface normal from the target surface,
and then set the value of that vertex to be either the value at that point
or the average or maximum value found along the projection vector.
By default, the surface will be saved as a vector with a length equal to the
number of vertices on the target surface. This is not a problem for Freesurfer
programs, but if you intend to use the file with interfaces to another package,
you must set the ``reshape`` input to True, which will factor the surface vector
into a matrix with dimensions compatible with proper Nifti files.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> sampler = fs.SampleToSurface(hemi="lh")
>>> sampler.inputs.source_file = "cope1.nii.gz"
>>> sampler.inputs.reg_file = "register.dat"
>>> sampler.inputs.sampling_method = "average"
>>> sampler.inputs.sampling_range = 1
>>> sampler.inputs.sampling_units = "frac"
>>> res = sampler.run() # doctest: +SKIP
"""
_cmd = "mri_vol2surf"
input_spec = SampleToSurfaceInputSpec
output_spec = SampleToSurfaceOutputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
def _format_arg(self, name, spec, value):
if name == "sampling_method":
range = self.inputs.sampling_range
units = self.inputs.sampling_units
if units == "mm":
units = "dist"
if isinstance(range, tuple):
range = "%.3f %.3f %.3f" % range
else:
range = "%.3f" % range
method = dict(point="", max="-max", average="-avg")[value]
return "--proj%s%s %s" % (units, method, range)
if name == "reg_header":
return spec.argstr % self.inputs.subject_id
if name == "override_reg_subj":
return spec.argstr % self.inputs.subject_id
if name in ["hits_file", "vox_file"]:
return spec.argstr % self._get_outfilename(name)
return super(SampleToSurface, self)._format_arg(name, spec, value)
def _get_outfilename(self, opt="out_file"):
outfile = getattr(self.inputs, opt)
if not isdefined(outfile) or isinstance(outfile, bool):
if isdefined(self.inputs.out_type):
if opt == "hits_file":
suffix = '_hits.' + self.filemap[self.inputs.out_type]
else:
suffix = '.' + self.filemap[self.inputs.out_type]
elif opt == "hits_file":
suffix = "_hits.mgz"
else:
suffix = '.mgz'
outfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix=suffix,
use_ext=False)
return outfile
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self._get_outfilename()
hitsfile = self.inputs.hits_file
if isdefined(hitsfile):
outputs["hits_file"] = hitsfile
if isinstance(hitsfile, bool):
hitsfile = self._get_outfilename("hits_file")
voxfile = self.inputs.vox_file
if isdefined(voxfile):
if isinstance(voxfile, bool):
voxfile = fname_presuffix(self.inputs.source_file,
newpath=os.getcwd(),
prefix=self.inputs.hemi + ".",
suffix="_vox.txt",
use_ext=False)
outputs["vox_file"] = voxfile
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSmoothInputSpec(FSTraitedSpec):
in_file = File(mandatory=True, argstr="--sval %s", desc="source surface file")
subject_id = traits.String(mandatory=True, argstr="--s %s", desc="subject id of surface file")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True, desc="hemisphere to operate on")
fwhm = traits.Float(argstr="--fwhm %.4f", xor=["smooth_iters"],
desc="effective FWHM of the smoothing process")
smooth_iters = traits.Int(argstr="--smooth %d", xor=["fwhm"],
desc="iterations of the smoothing process")
cortex = traits.Bool(True, argstr="--cortex", usedefault=True, desc="only smooth within $hemi.cortex.label")
reshape = traits.Bool(argstr="--reshape",
desc="reshape surface vector to fit in non-mgh format")
out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write")
class SurfaceSmoothOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="smoothed surface file")
class SurfaceSmooth(FSCommand):
"""Smooth a surface image with mri_surf2surf.
The surface is smoothed by an interative process of averaging the
value at each vertex with those of its adjacent neighbors. You may supply
either the number of iterations to run or a desired effective FWHM of the
smoothing process. If the latter, the underlying program will calculate
the correct number of iterations internally.
.. seealso::
SmoothTessellation() Interface
For smoothing a tessellated surface (e.g. in gifti or .stl)
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> smoother = fs.SurfaceSmooth()
>>> smoother.inputs.in_file = "lh.cope1.mgz"
>>> smoother.inputs.subject_id = "subj_1"
>>> smoother.inputs.hemi = "lh"
>>> smoother.inputs.fwhm = 5
>>> smoother.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceSmoothInputSpec
output_spec = SurfaceSmoothOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
in_file = self.inputs.in_file
if isdefined(self.inputs.fwhm):
kernel = self.inputs.fwhm
else:
kernel = self.inputs.smooth_iters
outputs["out_file"] = fname_presuffix(in_file,
suffix="_smooth%d" % kernel,
newpath=os.getcwd())
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr="--sval %s",
xor=['source_annot_file'],
help="surface file with source values")
source_annot_file = File(exists=True, mandatory=True, argstr="--sval-annot %s",
xor=['source_file'],
help="surface annotation file")
source_subject = traits.String(mandatory=True, argstr="--srcsubject %s",
help="subject id for source surface")
hemi = traits.Enum("lh", "rh", argstr="--hemi %s", mandatory=True,
desc="hemisphere to transform")
target_subject = traits.String(mandatory=True, argstr="--trgsubject %s",
help="subject id of target surface")
target_ico_order = traits.Enum(1, 2, 3, 4, 5, 6, 7, argstr="--trgicoorder %d",
help="order of the icosahedron if target_subject is 'ico'")
source_type = traits.Enum(filetypes, argstr='--sfmt %s', requires=['source_file'],
help="source file format")
target_type = traits.Enum(filetypes, argstr='--tfmt %s', help="output format")
reshape = traits.Bool(argstr="--reshape", help="reshape output surface to conform with Nifti")
reshape_factor = traits.Int(argstr="--reshape-factor", help="number of slices in reshaped image")
out_file = File(argstr="--tval %s", genfile=True, desc="surface file to write")
class SurfaceTransformOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="transformed surface file")
class SurfaceTransform(FSCommand):
"""Transform a surface file from one subject to another via a spherical registration.
Both the source and target subject must reside in your Subjects Directory,
and they must have been processed with recon-all, unless you are transforming
to one of the icosahedron meshes.
Examples
--------
>>> from nipype.interfaces.freesurfer import SurfaceTransform
>>> sxfm = SurfaceTransform()
>>> sxfm.inputs.source_file = "lh.cope1.nii.gz"
>>> sxfm.inputs.source_subject = "my_subject"
>>> sxfm.inputs.target_subject = "fsaverage"
>>> sxfm.inputs.hemi = "lh"
>>> sxfm.run() # doctest: +SKIP
"""
_cmd = "mri_surf2surf"
input_spec = SurfaceTransformInputSpec
output_spec = SurfaceTransformOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
source = self.inputs.source_file
# Some recon-all files don't have a proper extension (e.g. "lh.thickness")
# so we have to account for that here
bad_extensions = [".%s" % e for e in ["area", "mid", "pial", "avg_curv", "curv", "inflated",
"jacobian_white", "orig", "nofix", "smoothwm", "crv",
"sphere", "sulc", "thickness", "volume", "white"]]
use_ext = True
if split_filename(source)[2] in bad_extensions:
source = source + ".stripme"
use_ext = False
ext = ""
if isdefined(self.inputs.target_type):
ext = "." + filemap[self.inputs.target_type]
use_ext = False
outputs["out_file"] = fname_presuffix(source,
suffix=".%s%s" % (self.inputs.target_subject, ext),
newpath=os.getcwd(),
use_ext=use_ext)
else:
outputs["out_file"] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class ApplyMaskInputSpec(FSTraitedSpec):
in_file = File(exists=True, mandatory=True, position=-3, argstr="%s",
desc="input image (will be masked)")
mask_file = File(exists=True, mandatory=True, position=-2, argstr="%s",
desc="image defining mask space")
out_file = File(genfile=True, position=-1, argstr="%s",
desc="final image to write")
xfm_file = File(exists=True, argstr="-xform %s",
desc="LTA-format transformation matrix to align mask with input")
invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation")
xfm_source = File(exists=True, argstr="-lta_src %s", desc="image defining transform source space")
xfm_target = File(exists=True, argstr="-lta_dst %s", desc="image defining transform target space")
use_abs = traits.Bool(argstr="-abs", desc="take absolute value of mask before applying")
mask_thresh = traits.Float(argstr="-T %.4f", desc="threshold mask before applying")
class ApplyMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="masked image")
class ApplyMask(FSCommand):
"""Use Freesurfer's mri_mask to apply a mask to an image.
The mask file need not be binarized; it can be thresholded above a given
value before application. It can also optionally be transformed into input
space with an LTA matrix.
"""
_cmd = "mri_mask"
input_spec = ApplyMaskInputSpec
output_spec = ApplyMaskOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(outputs["out_file"]):
outputs["out_file"] = fname_presuffix(self.inputs.in_file,
suffix="_masked",
newpath=os.getcwd(),
use_ext=True)
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()[name]
return None
class SurfaceSnapshotsInputSpec(FSTraitedSpec):
subject_id = traits.String(position=1, argstr="%s", mandatory=True,
desc="subject to visualize")
hemi = traits.Enum("lh", "rh", position=2, argstr="%s", mandatory=True,
desc="hemisphere to visualize")
surface = traits.String(position=3, argstr="%s", mandatory=True,
desc="surface to visualize")
show_curv = traits.Bool(argstr="-curv", desc="show curvature", xor=["show_gray_curv"])
show_gray_curv = traits.Bool(argstr="-gray", desc="show curvature in gray", xor=["show_curv"])
overlay = File(exists=True, argstr="-overlay %s", desc="load an overlay volume/surface",
requires=["overlay_range"])
reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"]
overlay_reg = traits.File(exists=True, argstr="-overlay-reg %s", xor=reg_xors,
desc="registration matrix file to register overlay to surface")
identity_reg = traits.Bool(argstr="-overlay-reg-identity", xor=reg_xors,
desc="use the identity matrix to register the overlay to the surface")
mni152_reg = traits.Bool(argstr="-mni152reg", xor=reg_xors,
desc="use to display a volume in MNI152 space on the average subject")
overlay_range = traits.Either(traits.Float,
traits.Tuple(traits.Float, traits.Float),
traits.Tuple(traits.Float, traits.Float, traits.Float),
desc="overlay range--either min, (min, max) or (min, mid, max)",
argstr="%s")
overlay_range_offset = traits.Float(argstr="-foffset %.3f",
desc="overlay range will be symettric around offset value")
truncate_overlay = traits.Bool(argstr="-truncphaseflag 1",
desc="truncate the overlay display")
reverse_overlay = traits.Bool(argstr="-revphaseflag 1",
desc="reverse the overlay display")
invert_overlay = traits.Bool(argstr="-invphaseflag 1",
desc="invert the overlay display")
demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay")
annot_file = File(exists=True, argstr="-annotation %s", xor=["annot_name"],
desc="path to annotation file to display")
annot_name = traits.String(argstr="-annotation %s", xor=["annot_file"],
desc="name of annotation to display (must be in $subject/label directory")
label_file = File(exists=True, argstr="-label %s", xor=["label_name"],
desc="path to label file to display")
label_name = traits.String(argstr="-label %s", xor=["label_file"],
desc="name of label to display (must be in $subject/label directory")
colortable = File(exists=True, argstr="-colortable %s", desc="load colortable file")
label_under = traits.Bool(argstr="-labels-under", desc="draw label/annotation under overlay")
label_outline = traits.Bool(argstr="-label-outline", desc="draw label/annotation as outline")
patch_file = File(exists=True, argstr="-patch %s", desc="load a patch")
orig_suffix = traits.String(argstr="-orig %s", desc="set the orig surface suffix string")
sphere_suffix = traits.String(argstr="-sphere %s", desc="set the sphere.reg suffix string")
show_color_scale = traits.Bool(argstr="-colscalebarflag 1",
desc="display the color scale bar")
show_color_text = traits.Bool(argstr="-colscaletext 1",
desc="display text in the color scale bar")
six_images = traits.Bool(desc="also take anterior and posterior snapshots")
screenshot_stem = traits.String(desc="stem to use for screenshot file names")
stem_template_args = traits.List(traits.String, requires=["screenshot_stem"],
desc="input names to use as arguments for a string-formated stem template")
tcl_script = File(exists=True, argstr="%s", genfile=True,
desc="override default screenshot script")
class SurfaceSnapshotsOutputSpec(TraitedSpec):
snapshots = OutputMultiPath(File(exists=True),
desc="tiff images of the surface from different perspectives")
class SurfaceSnapshots(FSCommand):
"""Use Tksurfer to save pictures of the cortical surface.
By default, this takes snapshots of the lateral, medial, ventral,
and dorsal surfaces. See the ``six_images`` option to add the
anterior and posterior surfaces.
You may also supply your own tcl script (see the Freesurfer wiki for
information on scripting tksurfer). The screenshot stem is set as the
environment variable "_SNAPSHOT_STEM", which you can use in your
own scripts.
Node that this interface will not run if you do not have graphics
enabled on your system.
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial")
>>> shots.inputs.overlay = "zstat1.nii.gz"
>>> shots.inputs.overlay_range = (2.3, 6)
>>> shots.inputs.overlay_reg = "register.dat"
>>> res = shots.run() # doctest: +SKIP
"""
_cmd = "tksurfer"
input_spec = SurfaceSnapshotsInputSpec
output_spec = SurfaceSnapshotsOutputSpec
def _format_arg(self, name, spec, value):
if name == "tcl_script":
if not isdefined(value):
return "-tcl snapshots.tcl"
else:
return "-tcl %s" % value
elif name == "overlay_range":
if isinstance(value, float):
return "-fthresh %.3f" % value
else:
if len(value) == 2:
return "-fminmax %.3f %.3f" % value
else:
return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], value[2], value[1])
elif name == "annot_name" and isdefined(value):
# Matching annot by name needs to strip the leading hemi and trailing
# extension strings
if value.endswith(".annot"):
value = value[:-6]
if re.match("%s[\.\-_]" % self.inputs.hemi, value[:3]):
value = value[3:]
return "-annotation %s" % value
return super(SurfaceSnapshots, self)._format_arg(name, spec, value)
def _run_interface(self, runtime):
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (
self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
# Check if the DISPLAY variable is set -- should avoid crashes (might not?)
if not "DISPLAY" in os.environ:
raise RuntimeError("Graphics are not enabled -- cannot run tksurfer")
runtime.environ["_SNAPSHOT_STEM"] = stem
self._write_tcl_script()
runtime = super(SurfaceSnapshots, self)._run_interface(runtime)
# If a display window can't be opened, this will crash on
# aggregate_outputs. Let's try to parse stderr and raise a
# better exception here if that happened.
errors = ["surfer: failed, no suitable display found",
"Fatal Error in tksurfer.bin: could not open display"]
for err in errors:
if err in runtime.stderr:
self.raise_exception(runtime)
# Tksurfer always (or at least always when you run a tcl script)
# exits with a nonzero returncode. We have to force it to 0 here.
runtime.returncode = 0
return runtime
def _write_tcl_script(self):
fid = open("snapshots.tcl", "w")
script = ["save_tiff $env(_SNAPSHOT_STEM)-lat.tif",
"make_lateral_view",
"rotate_brain_y 180",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-med.tif",
"make_lateral_view",
"rotate_brain_x 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ven.tif",
"make_lateral_view",
"rotate_brain_x -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-dor.tif"]
if isdefined(self.inputs.six_images) and self.inputs.six_images:
script.extend(["make_lateral_view",
"rotate_brain_y 90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-pos.tif",
"make_lateral_view",
"rotate_brain_y -90",
"redraw",
"save_tiff $env(_SNAPSHOT_STEM)-ant.tif"])
script.append("exit")
fid.write("\n".join(script))
fid.close()
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.screenshot_stem):
stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, self.inputs.surface)
else:
stem = self.inputs.screenshot_stem
stem_args = self.inputs.stem_template_args
if isdefined(stem_args):
args = tuple([getattr(self.inputs, arg) for arg in stem_args])
stem = stem % args
snapshots = ["%s-lat.tif", "%s-med.tif", "%s-dor.tif", "%s-ven.tif"]
if self.inputs.six_images:
snapshots.extend(["%s-pos.tif", "%s-ant.tif"])
snapshots = [self._gen_fname(f % stem, suffix="") for f in snapshots]
outputs["snapshots"] = snapshots
return outputs
def _gen_filename(self, name):
if name == "tcl_script":
return "snapshots.tcl"
return None
class ImageInfoInputSpec(FSTraitedSpec):
in_file = File(exists=True, position=1, argstr="%s", desc="image to query")
class ImageInfoOutputSpec(TraitedSpec):
info = traits.Any(desc="output of mri_info")
out_file = File(exists=True, desc="text file with image information")
data_type = traits.String(desc="image data type")
file_format = traits.String(desc="file format")
TE = traits.String(desc="echo time (msec)")
TR = traits.String(desc="repetition time(msec)")
TI = traits.String(desc="inversion time (msec)")
dimensions = traits.Tuple(desc="image dimensions (voxels)")
vox_sizes = traits.Tuple(desc="voxel sizes (mm)")
orientation = traits.String(desc="image orientation")
ph_enc_dir = traits.String(desc="phase encode direction")
class ImageInfo(FSCommand):
_cmd = "mri_info"
input_spec = ImageInfoInputSpec
output_spec = ImageInfoOutputSpec
def info_regexp(self, info, field, delim="\n"):
m = re.search("%s\s*:\s+(.+?)%s" % (field, delim), info)
if m:
return m.group(1)
else:
return None
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
info = runtime.stdout
outputs.info = info
# Pulse sequence parameters
for field in ["TE", "TR", "TI"]:
fieldval = self.info_regexp(info, field, ", ")
if fieldval.endswith(" msec"):
fieldval = fieldval[:-5]
setattr(outputs, field, fieldval)
# Voxel info
vox = self.info_regexp(info, "voxel sizes")
vox = tuple(vox.split(", "))
outputs.vox_sizes = vox
dim = self.info_regexp(info, "dimensions")
dim = tuple([int(d) for d in dim.split(" x ")])
outputs.dimensions = dim
outputs.orientation = self.info_regexp(info, "Orientation")
outputs.ph_enc_dir = self.info_regexp(info, "PhEncDir")
# File format and datatype are both keyed by "type"
ftype, dtype = re.findall("%s\s*:\s+(.+?)\n" % "type", info)
outputs.file_format = ftype
outputs.data_type = dtype
return outputs
class MRIsConvertInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
annot_file = File(exists=True, argstr="--annot %s",
desc="input is annotation or gifti label data")
parcstats_file = File(exists=True, argstr="--parcstats %s",
desc="infile is name of text file containing label/val pairs")
label_file = File(exists=True, argstr="--label %s",
desc="infile is .label file, label is name of this label")
scalarcurv_file = File(exists=True, argstr="-c %s",
desc="input is scalar curv overlay file (must still specify surface)")
functional_file = File(exists=True, argstr="-f %s",
desc="input is functional time-series or other multi-frame data (must specify surface)")
labelstats_outfile = File(exists=False, argstr="--labelstats %s",
desc="outfile is name of gifti file to which label stats will be written")
patch = traits.Bool(argstr="-p", desc="input is a patch, not a full surface")
rescale = traits.Bool(argstr="-r", desc="rescale vertex xyz so total area is same as group average")
normal = traits.Bool(argstr="-n", desc="output is an ascii file where vertex data")
xyz_ascii = traits.Bool(argstr="-a", desc="Print only surface xyz to ascii file")
vertex = traits.Bool(argstr="-v", desc="Writes out neighbors of a vertex in each row")
scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale")
dataarray_num = traits.Int(argstr="--da_num %d", desc="if input is gifti, 'num' specifies which data array to use")
talairachxfm_subjid = traits.String(argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz")
origname = traits.String(argstr="-o %s", desc="read orig positions")
in_file = File(exists=True, mandatory=True, position=-2, argstr='%s', desc='File to read/convert')
out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one')
#Not really sure why the ./ is necessary but the module fails without it
out_datatype = traits.Enum("ico", "tri", "stl", "vtk", "gii", "mgh", "mgz", mandatory=True,
desc="These file formats are supported: ASCII: .asc" \
"ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz")
class MRIsConvertOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
"""
converted = File(exists=True, desc='converted output surface')
class MRIsConvert(FSCommand):
"""
Uses Freesurfer's mris_convert to convert surface files to various formats
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mris = fs.MRIsConvert()
>>> mris.inputs.in_file = 'lh.pial'
>>> mris.inputs.out_datatype = 'gii'
>>> mris.run() # doctest: +SKIP
"""
_cmd = 'mris_convert'
input_spec = MRIsConvertInputSpec
output_spec = MRIsConvertOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["converted"] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.annot_file):
_, name, ext = split_filename(self.inputs.annot_file)
elif isdefined(self.inputs.parcstats_file):
_, name, ext = split_filename(self.inputs.parcstats_file)
elif isdefined(self.inputs.label_file):
_, name, ext = split_filename(self.inputs.label_file)
elif isdefined(self.inputs.scalarcurv_file):
_, name, ext = split_filename(self.inputs.scalarcurv_file)
elif isdefined(self.inputs.functional_file):
_, name, ext = split_filename(self.inputs.functional_file)
elif isdefined(self.inputs.in_file):
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + "_converted." + self.inputs.out_datatype
class MRITessellateInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=-2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
out_file = File(argstr='./%s', position=-1, genfile=True, desc='output filename or True to generate one')
tesselate_all_voxels = traits.Bool(argstr='-a', desc='Tessellate the surface of all voxels with different labels')
use_real_RAS_coordinates = traits.Bool(argstr='-n', desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0')
class MRITessellateOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRITessellate(FSCommand):
"""
Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> tess = fs.MRITessellate()
>>> tess.inputs.in_file = 'aseg.mgz'
>>> tess.inputs.label_value = 17
>>> tess.inputs.out_file = 'lh.hippocampus'
>>> tess.run() # doctest: +SKIP
"""
_cmd = 'mri_tessellate'
input_spec = MRITessellateInputSpec
output_spec = MRITessellateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return self.inputs.out_file
else:
_, name, ext = split_filename(self.inputs.in_file)
return name + ext + '_' + str(self.inputs.label_value)
class MRIMarchingCubesInputSpec(FSTraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
in_file = File(exists=True, mandatory=True, position=1, argstr='%s', desc='Input volume to tesselate voxels from.')
label_value = traits.Int(position=2, argstr='%d', mandatory=True,
desc='Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)')
connectivity_value = traits.Int(1, position=-1, argstr='%d', usedefault=True,
desc='Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)')
out_file = File(argstr='./%s', position=-2, genfile=True, desc='output filename or True to generate one')
class MRIMarchingCubesOutputSpec(TraitedSpec):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
"""
surface = File(exists=True, desc='binary surface of the tessellation ')
class MRIMarchingCubes(FSCommand):
"""
Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> mc = fs.MRIMarchingCubes()
>>> mc.inputs.in_file = 'aseg.mgz'
>>> mc.inputs.label_value = 17
>>> mc.inputs.out_file = 'lh.hippocampus'
>>> mc.run() # doctest: +SKIP
"""
_cmd = 'mri_mc'
input_spec = MRIMarchingCubesInputSpec
output_spec = MRIMarchingCubesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + ext + '_' + str(self.inputs.label_value))
class SmoothTessellationInputSpec(FSTraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
in_file = File(exists=True, mandatory=True, argstr='%s', position=1, desc='Input volume to tesselate voxels from.')
curvature_averaging_iterations = traits.Int(10, usedefault=True, argstr='-a %d', position=-1, desc='Number of curvature averaging iterations (default=10)')
smoothing_iterations = traits.Int(10, usedefault=True, argstr='-n %d', position=-2, desc='Number of smoothing iterations (default=10)')
snapshot_writing_iterations = traits.Int(argstr='-w %d', desc='Write snapshot every "n" iterations')
use_gaussian_curvature_smoothing = traits.Bool(argstr='-g', position=3, desc='Use Gaussian curvature smoothing')
gaussian_curvature_norm_steps = traits.Int(argstr='%d ', position=4, desc='Use Gaussian curvature smoothing')
gaussian_curvature_smoothing_steps = traits.Int(argstr='%d', position=5, desc='Use Gaussian curvature smoothing')
disable_estimates = traits.Bool(argstr='-nw', desc='Disables the writing of curvature and area estimates')
normalize_area = traits.Bool(argstr='-area', desc='Normalizes the area after smoothing')
use_momentum = traits.Bool(argstr='-m', desc='Uses momentum')
out_file = File(argstr='./%s', position=2, genfile=True, desc='output filename or True to generate one')
out_curvature_file = File(argstr='-c ./%s', desc='Write curvature to ?h.curvname (default "curv")')
out_area_file = File(argstr='-b ./%s', desc='Write area to ?h.areaname (default "area")')
class SmoothTessellationOutputSpec(TraitedSpec):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
"""
surface = File(exists=True, desc='Smoothed surface file ')
class SmoothTessellation(FSCommand):
"""
This program smooths the tessellation of a surface using 'mris_smooth'
.. seealso::
SurfaceSmooth() Interface
For smoothing a scalar field along a surface manifold
Example
-------
>>> import nipype.interfaces.freesurfer as fs
>>> smooth = fs.SmoothTessellation()
>>> smooth.inputs.in_file = 'lh.hippocampus.stl'
>>> smooth.run() # doctest: +SKIP
"""
_cmd = 'mris_smooth'
input_spec = SmoothTessellationInputSpec
output_spec = SmoothTessellationOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['surface'] = self._gen_outfilename()
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
return os.path.abspath(self.inputs.out_file)
else:
_, name, ext = split_filename(self.inputs.in_file)
return os.path.abspath(name + '_smoothed' + ext)
class MakeAverageSubjectInputSpec(FSTraitedSpec):
subjects_ids = traits.List(traits.Str(), argstr='--subjects %s',
desc='freesurfer subjects ids to average',
mandatory=True, sep=' ')
out_name = File('average', argstr='--out %s',
desc='name for the average subject', usedefault=True)
class MakeAverageSubjectOutputSpec(TraitedSpec):
average_subject_name = traits.Str(desc='Output registration file')
class MakeAverageSubject(FSCommand):
"""Make an average freesurfer subject
Examples
--------
>>> from nipype.interfaces.freesurfer import MakeAverageSubject
>>> avg = MakeAverageSubject(subjects_ids=['s1', 's2'])
>>> avg.cmdline
'make_average_subject --out average --subjects s1 s2'
"""
_cmd = 'make_average_subject'
input_spec = MakeAverageSubjectInputSpec
output_spec = MakeAverageSubjectOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['average_subject_name'] = self.inputs.out_name
return outputs
| christianbrodbeck/nipype | nipype/interfaces/freesurfer/utils.py | Python | bsd-3-clause | 44,609 | [
"Gaussian",
"VTK"
] | 49f9b27397b7a10b051d405ea4d38d50a9a157275bbac4089c60cfd4c755602b |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unsmuggle_url,
)
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
["ag", "76440", "http://ag-f.akamaihd.net"],
["aging", "76442", "http://aging-f.akamaihd.net"],
["approps", "76441", "http://approps-f.akamaihd.net"],
["armed", "76445", "http://armed-f.akamaihd.net"],
["banking", "76446", "http://banking-f.akamaihd.net"],
["budget", "76447", "http://budget-f.akamaihd.net"],
["cecc", "76486", "http://srs-f.akamaihd.net"],
["commerce", "80177", "http://commerce1-f.akamaihd.net"],
["csce", "75229", "http://srs-f.akamaihd.net"],
["dpc", "76590", "http://dpc-f.akamaihd.net"],
["energy", "76448", "http://energy-f.akamaihd.net"],
["epw", "76478", "http://epw-f.akamaihd.net"],
["ethics", "76449", "http://ethics-f.akamaihd.net"],
["finance", "76450", "http://finance-f.akamaihd.net"],
["foreign", "76451", "http://foreign-f.akamaihd.net"],
["govtaff", "76453", "http://govtaff-f.akamaihd.net"],
["help", "76452", "http://help-f.akamaihd.net"],
["indian", "76455", "http://indian-f.akamaihd.net"],
["intel", "76456", "http://intel-f.akamaihd.net"],
["intlnarc", "76457", "http://intlnarc-f.akamaihd.net"],
["jccic", "85180", "http://jccic-f.akamaihd.net"],
["jec", "76458", "http://jec-f.akamaihd.net"],
["judiciary", "76459", "http://judiciary-f.akamaihd.net"],
["rpc", "76591", "http://rpc-f.akamaihd.net"],
["rules", "76460", "http://rules-f.akamaihd.net"],
["saa", "76489", "http://srs-f.akamaihd.net"],
["smbiz", "76461", "http://smbiz-f.akamaihd.net"],
["srs", "75229", "http://srs-f.akamaihd.net"],
["uscc", "76487", "http://srs-f.akamaihd.net"],
["vetaff", "76462", "http://vetaff-f.akamaihd.net"],
["arch", "", "http://ussenate-f.akamaihd.net/"]
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'http://www\.senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
'thumbnail': 're:^https?://.*\.(?:jpg|png)$',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'info_dict': {
'id': 'intel090613',
'ext': 'mp4',
'title': 'Integrated Senate Video Player'
}
}, {
# From http://www.c-span.org/video/?96791-1
'url': 'http://www.senate.gov/isvp?type=live&comm=banking&filename=banking012715',
'only_matching': True,
}]
@staticmethod
def _search_iframe_url(webpage):
mobj = re.search(
r"<iframe[^>]+src=['\"](?P<url>http://www\.senate\.gov/isvp/?\?[^'\"]+)['\"]",
webpage)
if mobj:
return mobj.group('url')
def _get_info_for_comm(self, committee):
for entry in self._COMM_MAP:
if entry[0] == committee:
return entry[1:]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('qs'))
if not qs.get('filename') or not qs.get('type') or not qs.get('comm'):
raise ExtractorError('Invalid URL', expected=True)
video_id = re.sub(r'.mp4$', '', qs['filename'][0])
webpage = self._download_webpage(url, video_id)
if smuggled_data.get('force_title'):
title = smuggled_data['force_title']
else:
title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, video_id)
poster = qs.get('poster')
thumbnail = poster[0] if poster else None
video_type = qs['type'][0]
committee = video_type if video_type == 'arch' else qs['comm'][0]
stream_num, domain = self._get_info_for_comm(committee)
formats = []
if video_type == 'arch':
filename = video_id if '.' in video_id else video_id + '.mp4'
formats = [{
# All parameters in the query string are necessary to prevent a 403 error
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
}]
else:
hdcore_sign = 'hdcore=3.1.0'
url_params = (domain, video_id, stream_num)
f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
# URLs without the extra param induce an 404 error
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.append(entry)
for entry in self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', m3u8_id='m3u8'):
mobj = re.search(r'(?P<tag>(?:-p|-b)).m3u8', entry['url'])
if mobj:
entry['format_id'] += mobj.group('tag')
formats.append(entry)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
| lzambella/Qyoutube-dl | youtube_dl/extractor/senateisvp.py | Python | gpl-3.0 | 6,265 | [
"EPW"
] | e3793a0fba7b8b31b08e1f9fca0dcdb3a82bcdfcfeec998e0c5483f5292abc23 |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
import warnings
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_decrease,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the integer %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the float %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either None "
"or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split is not None:
warnings.warn("The min_impurity_split parameter is deprecated and"
" will be removed in version 0.21. "
"Use the min_impurity_decrease parameter instead.",
DeprecationWarning)
min_impurity_split = self.min_impurity_split
else:
min_impurity_split = 1e-7
if min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
if self.min_impurity_decrease < 0.:
raise ValueError("min_impurity_decrease must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
self.min_impurity_decrease,
min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_decrease,
min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'tree_')
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
class_weight : dict, list of dicts, "balanced" or None, default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeClassifier, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool
Run check_array on X.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion and minimizes the L2 loss
using the mean of each terminal node, "friedman_mse", which uses mean
squared error with Friedman's improvement score for potential splits,
and "mae" for the mean absolute error, which minimizes the L1 loss
using the median of each terminal node.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeRegressor, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
class_weight : dict, list of dicts, "balanced" or None, default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_decrease=0.,
min_impurity_split=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state)
| wazeerzulfikar/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 60,513 | [
"Brian"
] | a7dc3b1a4d263c0be1643dd6f18ca291a2ea365152b33259899f7ffebe8e283d |
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/cidfonts.py
#$Header $
__version__='3.3.0'
__doc__="""CID (Asian multi-byte) font support.
This defines classes to represent CID fonts. They know how to calculate
their own width and how to write themselves into PDF files."""
import os
import marshal
import time
try:
from hashlib import md5
except ImportError:
from md5 import md5
import reportlab
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase._cidfontdata import allowedTypeFaces, allowedEncodings, CIDFontInfo, \
defaultUnicodeEncodings, widthsByUnichar
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfdoc
from reportlab.lib.rl_accel import escapePDF
from reportlab.rl_config import CMapSearchPath
from reportlab.lib.utils import isSeq, isBytes
#quick hackery for 2.0 release. Now we always do unicode, and have built in
#the CMAP data, any code to load CMap files is not needed.
DISABLE_CMAP = True
def findCMapFile(name):
"Returns full filename, or raises error"
for dirname in CMapSearchPath:
cmapfile = dirname + os.sep + name
if os.path.isfile(cmapfile):
#print "found", cmapfile
return cmapfile
raise IOError('CMAP file for encodings "%s" not found!' % name)
def structToPDF(structure):
"Converts deeply nested structure to PDFdoc dictionary/array objects"
if isinstance(structure,dict):
newDict = {}
for k, v in structure.items():
newDict[k] = structToPDF(v)
return pdfdoc.PDFDictionary(newDict)
elif isSeq(structure):
newList = []
for elem in structure:
newList.append(structToPDF(elem))
return pdfdoc.PDFArray(newList)
else:
return structure
class CIDEncoding(pdfmetrics.Encoding):
"""Multi-byte encoding. These are loaded from CMAP files.
A CMAP file is like a mini-codec. It defines the correspondence
between code points in the (multi-byte) input data and Character
IDs. """
# aims to do similar things to Brian Hooper's CMap class,
# but I could not get it working and had to rewrite.
# also, we should really rearrange our current encoding
# into a SingleByteEncoding since many of its methods
# should not apply here.
def __init__(self, name, useCache=1):
self.name = name
self._mapFileHash = None
self._codeSpaceRanges = []
self._notDefRanges = []
self._cmap = {}
self.source = None
if not DISABLE_CMAP:
if useCache:
from reportlab.lib.utils import get_rl_tempdir
fontmapdir = get_rl_tempdir('FastCMAPS')
if os.path.isfile(fontmapdir + os.sep + name + '.fastmap'):
self.fastLoad(fontmapdir)
self.source = fontmapdir + os.sep + name + '.fastmap'
else:
self.parseCMAPFile(name)
self.source = 'CMAP: ' + name
self.fastSave(fontmapdir)
else:
self.parseCMAPFile(name)
def _hash(self, text):
hasher = md5()
hasher.update(text)
return hasher.digest()
def parseCMAPFile(self, name):
"""This is a tricky one as CMAP files are Postscript
ones. Some refer to others with a 'usecmap'
command"""
#started = time.clock()
cmapfile = findCMapFile(name)
# this will CRAWL with the unicode encodings...
rawdata = open(cmapfile, 'r').read()
self._mapFileHash = self._hash(rawdata)
#if it contains the token 'usecmap', parse the other
#cmap file first....
usecmap_pos = rawdata.find('usecmap')
if usecmap_pos > -1:
#they tell us to look in another file
#for the code space ranges. The one
# to use will be the previous word.
chunk = rawdata[0:usecmap_pos]
words = chunk.split()
otherCMAPName = words[-1]
#print 'referred to another CMAP %s' % otherCMAPName
self.parseCMAPFile(otherCMAPName)
# now continue parsing this, as it may
# override some settings
words = rawdata.split()
while words != []:
if words[0] == 'begincodespacerange':
words = words[1:]
while words[0] != 'endcodespacerange':
strStart, strEnd, words = words[0], words[1], words[2:]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
self._codeSpaceRanges.append((start, end),)
elif words[0] == 'beginnotdefrange':
words = words[1:]
while words[0] != 'endnotdefrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
self._notDefRanges.append((start, end, value),)
words = words[3:]
elif words[0] == 'begincidrange':
words = words[1:]
while words[0] != 'endcidrange':
strStart, strEnd, strValue = words[0:3]
start = int(strStart[1:-1], 16)
end = int(strEnd[1:-1], 16)
value = int(strValue)
# this means that 'start' corresponds to 'value',
# start+1 corresponds to value+1 and so on up
# to end
offset = 0
while start + offset <= end:
self._cmap[start + offset] = value + offset
offset = offset + 1
words = words[3:]
else:
words = words[1:]
#finished = time.clock()
#print 'parsed CMAP %s in %0.4f seconds' % (self.name, finished - started)
def translate(self, text):
"Convert a string into a list of CIDs"
output = []
cmap = self._cmap
lastChar = ''
for char in text:
if lastChar != '':
#print 'convert character pair "%s"' % (lastChar + char)
num = ord(lastChar) * 256 + ord(char)
else:
#print 'convert character "%s"' % char
num = ord(char)
lastChar = char
found = 0
for low, high in self._codeSpaceRanges:
if low < num < high:
try:
cid = cmap[num]
#print '%d -> %d' % (num, cid)
except KeyError:
#not defined. Try to find the appropriate
# notdef character, or failing that return
# zero
cid = 0
for low2, high2, notdef in self._notDefRanges:
if low2 < num < high2:
cid = notdef
break
output.append(cid)
found = 1
break
if found:
lastChar = ''
else:
lastChar = char
return output
def fastSave(self, directory):
f = open(os.path.join(directory, self.name + '.fastmap'), 'wb')
marshal.dump(self._mapFileHash, f)
marshal.dump(self._codeSpaceRanges, f)
marshal.dump(self._notDefRanges, f)
marshal.dump(self._cmap, f)
f.close()
def fastLoad(self, directory):
started = time.clock()
f = open(os.path.join(directory, self.name + '.fastmap'), 'rb')
self._mapFileHash = marshal.load(f)
self._codeSpaceRanges = marshal.load(f)
self._notDefRanges = marshal.load(f)
self._cmap = marshal.load(f)
f.close()
finished = time.clock()
#print 'loaded %s in %0.4f seconds' % (self.name, finished - started)
def getData(self):
"""Simple persistence helper. Return a dict with all that matters."""
return {
'mapFileHash': self._mapFileHash,
'codeSpaceRanges': self._codeSpaceRanges,
'notDefRanges': self._notDefRanges,
'cmap': self._cmap,
}
class CIDTypeFace(pdfmetrics.TypeFace):
"""Multi-byte type face.
Conceptually similar to a single byte typeface,
but the glyphs are identified by a numeric Character
ID (CID) and not a glyph name. """
def __init__(self, name):
"""Initialised from one of the canned dictionaries in allowedEncodings
Or rather, it will be shortly..."""
pdfmetrics.TypeFace.__init__(self, name)
self._extractDictInfo(name)
def _extractDictInfo(self, name):
try:
fontDict = CIDFontInfo[name]
except KeyError:
raise KeyError("Unable to find information on CID typeface '%s'" % name +
"Only the following font names work:" + repr(allowedTypeFaces))
descFont = fontDict['DescendantFonts'][0]
self.ascent = descFont['FontDescriptor']['Ascent']
self.descent = descFont['FontDescriptor']['Descent']
self._defaultWidth = descFont['DW']
self._explicitWidths = self._expandWidths(descFont['W'])
# should really support self.glyphWidths, self.glyphNames
# but not done yet.
def _expandWidths(self, compactWidthArray):
"""Expands Adobe nested list structure to get a dictionary of widths.
Here is an example of such a structure.::
(
# starting at character ID 1, next n characters have the widths given.
1, (277,305,500,668,668,906,727,305,445,445,508,668,305,379,305,539),
# all Characters from ID 17 to 26 are 668 em units wide
17, 26, 668,
27, (305, 305, 668, 668, 668, 566, 871, 727, 637, 652, 699, 574, 555,
676, 687, 242, 492, 664, 582, 789, 707, 734, 582, 734, 605, 605,
641, 668, 727, 945, 609, 609, 574, 445, 668, 445, 668, 668, 590,
555, 609, 547, 602, 574, 391, 609, 582, 234, 277, 539, 234, 895,
582, 605, 602, 602, 387, 508, 441, 582, 562, 781, 531, 570, 555,
449, 246, 449, 668),
# these must be half width katakana and the like.
231, 632, 500
)
"""
data = compactWidthArray[:]
widths = {}
while data:
start, data = data[0], data[1:]
if isSeq(data[0]):
items, data = data[0], data[1:]
for offset in range(len(items)):
widths[start + offset] = items[offset]
else:
end, width, data = data[0], data[1], data[2:]
for idx in range(start, end+1):
widths[idx] = width
return widths
def getCharWidth(self, characterId):
return self._explicitWidths.get(characterId, self._defaultWidth)
class CIDFont(pdfmetrics.Font):
"Represents a built-in multi-byte font"
_multiByte = 1
def __init__(self, face, encoding):
assert face in allowedTypeFaces, "TypeFace '%s' not supported! Use any of these instead: %s" % (face, allowedTypeFaces)
self.faceName = face
#should cache in registry...
self.face = CIDTypeFace(face)
assert encoding in allowedEncodings, "Encoding '%s' not supported! Use any of these instead: %s" % (encoding, allowedEncodings)
self.encodingName = encoding
self.encoding = CIDEncoding(encoding)
#legacy hack doing quick cut and paste.
self.fontName = self.faceName + '-' + self.encodingName
self.name = self.fontName
# need to know if it is vertical or horizontal
self.isVertical = (self.encodingName[-1] == 'V')
#no substitutes initially
self.substitutionFonts = []
def formatForPdf(self, text):
encoded = escapePDF(text)
#print 'encoded CIDFont:', encoded
return encoded
def stringWidth(self, text, size, encoding=None):
"""This presumes non-Unicode input. UnicodeCIDFont wraps it for that context"""
cidlist = self.encoding.translate(text)
if self.isVertical:
#this part is "not checked!" but seems to work.
#assume each is 1000 ems high
return len(cidlist) * size
else:
w = 0
for cid in cidlist:
w = w + self.face.getCharWidth(cid)
return 0.001 * w * size
def addObjects(self, doc):
"""The explicit code in addMinchoObjects and addGothicObjects
will be replaced by something that pulls the data from
_cidfontdata.py in the next few days."""
internalName = 'F' + repr(len(doc.fontMapping)+1)
bigDict = CIDFontInfo[self.face.name]
bigDict['Name'] = '/' + internalName
bigDict['Encoding'] = '/' + self.encodingName
#convert to PDF dictionary/array objects
cidObj = structToPDF(bigDict)
# link into document, and add to font map
r = doc.Reference(cidObj, internalName)
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = r
doc.fontMapping[self.name] = '/' + internalName
class UnicodeCIDFont(CIDFont):
"""Wraps up CIDFont to hide explicit encoding choice;
encodes text for output as UTF16.
lang should be one of 'jpn',chs','cht','kor' for now.
if vertical is set, it will select a different widths array
and possibly glyphs for some punctuation marks.
halfWidth is only for Japanese.
>>> dodgy = UnicodeCIDFont('nonexistent')
Traceback (most recent call last):
...
KeyError: "don't know anything about CID font nonexistent"
>>> heisei = UnicodeCIDFont('HeiseiMin-W3')
>>> heisei.name
'HeiseiMin-W3'
>>> heisei.language
'jpn'
>>> heisei.encoding.name
'UniJIS-UCS2-H'
>>> #This is how PDF data gets encoded.
>>> print(heisei.formatForPdf('hello'))
\\000h\\000e\\000l\\000l\\000o
>>> tokyo = u'\u6771\u4AEC'
>>> print(heisei.formatForPdf(tokyo))
gqJ\\354
>>> print(heisei.stringWidth(tokyo,10))
20.0
>>> print(heisei.stringWidth('hello world',10))
45.83
"""
def __init__(self, face, isVertical=False, isHalfWidth=False):
#pass
try:
lang, defaultEncoding = defaultUnicodeEncodings[face]
except KeyError:
raise KeyError("don't know anything about CID font %s" % face)
#we know the languages now.
self.language = lang
#rebuilt encoding string. They follow rules which work
#for the 7 fonts provided.
enc = defaultEncoding[:-1]
if isHalfWidth:
enc = enc + 'HW-'
if isVertical:
enc = enc + 'V'
else:
enc = enc + 'H'
#now we can do the more general case
CIDFont.__init__(self, face, enc)
#self.encName = 'utf_16_le'
#it's simpler for unicode, just use the face name
self.name = self.fontName = face
self.vertical = isVertical
self.isHalfWidth = isHalfWidth
self.unicodeWidths = widthsByUnichar[self.name]
def formatForPdf(self, text):
#these ones should be encoded asUTF16 minus the BOM
from codecs import utf_16_be_encode
#print 'formatting %s: %s' % (type(text), repr(text))
if isBytes(text):
text = text.decode('utf8')
utfText = utf_16_be_encode(text)[0]
encoded = escapePDF(utfText)
#print ' encoded:',encoded
return encoded
#
#result = escapePDF(encoded)
#print ' -> %s' % repr(result)
#return result
def stringWidth(self, text, size, encoding=None):
"Just ensure we do width test on characters, not bytes..."
if isBytes(text):
text = text.decode('utf8')
widths = self.unicodeWidths
return size * 0.001 * sum([widths.get(uch, 1000) for uch in text])
#return CIDFont.stringWidth(self, text, size, encoding)
def precalculate(cmapdir):
# crunches through all, making 'fastmap' files
import os
files = os.listdir(cmapdir)
for file in files:
if os.path.isfile(cmapdir + os.sep + file + '.fastmap'):
continue
try:
enc = CIDEncoding(file)
except:
print('cannot parse %s, skipping' % enc)
continue
enc.fastSave(cmapdir)
print('saved %s.fastmap' % file)
def test():
# only works if you have cirrect encodings on your box!
c = Canvas('test_japanese.pdf')
c.setFont('Helvetica', 30)
c.drawString(100,700, 'Japanese Font Support')
pdfmetrics.registerFont(CIDFont('HeiseiMin-W3','90ms-RKSJ-H'))
pdfmetrics.registerFont(CIDFont('HeiseiKakuGo-W5','90ms-RKSJ-H'))
# the two typefaces
c.setFont('HeiseiMin-W3-90ms-RKSJ-H', 16)
# this says "This is HeiseiMincho" in shift-JIS. Not all our readers
# have a Japanese PC, so I escaped it. On a Japanese-capable
# system, print the string to see Kanji
message1 = '\202\261\202\352\202\315\225\275\220\254\226\276\222\251\202\305\202\267\201B'
c.drawString(100, 675, message1)
c.save()
print('saved test_japanese.pdf')
## print 'CMAP_DIR = ', CMAP_DIR
## tf1 = CIDTypeFace('HeiseiMin-W3')
## print 'ascent = ',tf1.ascent
## print 'descent = ',tf1.descent
## for cid in [1,2,3,4,5,18,19,28,231,1742]:
## print 'width of cid %d = %d' % (cid, tf1.getCharWidth(cid))
encName = '90ms-RKSJ-H'
enc = CIDEncoding(encName)
print(message1, '->', enc.translate(message1))
f = CIDFont('HeiseiMin-W3','90ms-RKSJ-H')
print('width = %0.2f' % f.stringWidth(message1, 10))
#testing all encodings
## import time
## started = time.time()
## import glob
## for encName in _cidfontdata.allowedEncodings:
## #encName = '90ms-RKSJ-H'
## enc = CIDEncoding(encName)
## print 'encoding %s:' % encName
## print ' codeSpaceRanges = %s' % enc._codeSpaceRanges
## print ' notDefRanges = %s' % enc._notDefRanges
## print ' mapping size = %d' % len(enc._cmap)
## finished = time.time()
## print 'constructed all encodings in %0.2f seconds' % (finished - started)
if __name__=='__main__':
import doctest
from reportlab.pdfbase import cidfonts
doctest.testmod(cidfonts)
#test()
| sandeepkoduri/GAE-html-to-pdf | libs/reportlab/pdfbase/cidfonts.py | Python | mit | 18,851 | [
"Brian"
] | 34a923080ab34a3582bde863f47cda128a3d3fda480f2a4325d64001756a794b |
import re
import ast
import sys
try:
from distutils.util import get_platform
is_windows = get_platform().startswith("win")
except ImportError:
# Don't break install if distuils is incompatible in some way
# probably overly defensive.
is_windows = False
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'six',
'webob',
'psutil',
'pyyaml',
]
if sys.version_info[0] == 2:
requirements.append('PasteScript')
requirements.append('paste')
test_requirements = [
# TODO: put package test requirements here
]
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pulsar/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
if is_windows:
scripts = ["scripts/pulsar.bat"]
else:
scripts = ["scripts/pulsar"]
setup(
name='pulsar-app',
version=version,
description='Distributed job execution application built for Galaxy (http://galaxyproject.org/).',
long_description=readme + '\n\n' + history,
author='Galaxy Project',
author_email='[email protected]',
url='https://github.com/galaxyproject/pulsar',
packages=[
'pulsar',
'pulsar.cache',
'pulsar.client',
'pulsar.client.test',
'pulsar.client.staging',
'pulsar.client.transport',
'pulsar.managers',
'pulsar.managers.base',
'pulsar.managers.staging',
'pulsar.managers.util',
'pulsar.managers.util.cli',
'pulsar.managers.util.cli.job',
'pulsar.managers.util.cli.shell',
'pulsar.managers.util.condor',
'pulsar.managers.util.drmaa',
'pulsar.managers.util.job_script',
'pulsar.mesos',
'pulsar.messaging',
'pulsar.scripts',
'pulsar.tools',
'pulsar.web',
'galaxy',
'galaxy.jobs',
'galaxy.jobs.metrics',
'galaxy.jobs.metrics.collectl',
'galaxy.jobs.metrics.instrumenters',
'galaxy.objectstore',
'galaxy.tools',
'galaxy.tools.linters',
'galaxy.tools.deps',
'galaxy.tools.deps.resolvers',
'galaxy.util',
],
entry_points='''
[console_scripts]
pulsar-main=pulsar.main:main
pulsar-check=pulsar.client.test.check:main
pulsar-config=pulsar.scripts.config:main
pulsar-drmaa-launch=pulsar.scripts.drmaa_launch:main
pulsar-drmaa-kill=pulsar.scripts.drmaa_kill:main
pulsar-chown-working-directory=pulsar.scripts.chown_working_directory:main
pulsar-submit=pulsar.scripts.submit:main
pulsar-run=pulsar.scripts.run:main
''',
scripts=scripts,
package_data={'pulsar': [
'managers/util/job_script/DEFAULT_JOB_FILE_TEMPLATE.sh',
'managers/util/job_script/CLUSTER_SLOTS_STATEMENT.sh',
]},
package_dir={'pulsar': 'pulsar',
'galaxy': 'galaxy'},
include_package_data=True,
install_requires=requirements,
license="Apache License 2.0",
zip_safe=False,
keywords='pulsar',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
test_suite='test',
tests_require=test_requirements
)
| ssorgatem/pulsar | setup.py | Python | apache-2.0 | 3,786 | [
"Galaxy"
] | 9eceb8a6cb38ed09c35aef9367684404bf3d2dfc6dd2325e9fbfb600692c2f58 |
import os
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from rest_framework.exceptions import ValidationError as DRF_ValidationError
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core import hydroshare
from hs_core.models import Coverage, ResourceFile
from hs_core.views.utils import remove_folder, move_or_rename_file_or_folder
from hs_app_netCDF.models import OriginalCoverage, Variable
from hs_file_types.models import NetCDFLogicalFile, NetCDFFileMetaData
from hs_file_types.models.base import METADATA_FILE_ENDSWITH, RESMAP_FILE_ENDSWITH
from .utils import assert_netcdf_file_type_metadata, CompositeResourceTestMixin, \
get_path_with_no_file_extension
class NetCDFFileTypeTest(MockIRODSTestCaseMixin, TransactionTestCase,
CompositeResourceTestMixin):
def setUp(self):
super(NetCDFFileTypeTest, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'[email protected]',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.res_title = "Testing NetCDF File Type"
self.netcdf_file_name = 'netcdf_valid.nc'
self.netcdf_file = 'hs_file_types/tests/{}'.format(self.netcdf_file_name)
self.netcdf_invalid_file_name = 'netcdf_invalid.nc'
self.netcdf_invalid_file = 'hs_file_types/tests/{}'.format(self.netcdf_invalid_file_name)
def test_create_aggregation_from_nc_file_1(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is at the root of the folder hierarchy
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
base_file_name, _ = os.path.splitext(res_file.file_name)
expected_res_file_folder_path = res_file.file_folder
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test extracted metadata
assert_netcdf_file_type_metadata(self, self.res_title,
aggr_folder=expected_res_file_folder_path)
# test file level keywords
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent')
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_nc_file_2(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is not at the root of the folder hierarchy but in a folder
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test extracted metadata
assert_netcdf_file_type_metadata(self, self.res_title, aggr_folder=new_folder)
# test file level keywords
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent')
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_nc_file_3(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is not at the root of the folder hierarchy but in a folder. The
# same folder contains another file that's not going part of the aggregation
# location of the nc file before aggregation is created: /my_folder/netcdf_valid.nc
# location of another file before aggregation is created: /my_folder/netcdf_invalid.nc
# location of nc file after aggregation is created:
# /my_folder/netcdf_valid.nc
# location of another file after aggregation is created: /my_folder/netcdf_invalid.nc
self.create_composite_resource()
new_folder = 'my_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# add another file to the same folder
self.add_file_to_resource(file_to_add=self.netcdf_invalid_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(self.composite_resource.files.all().count(), 3)
# test logical file/aggregation
self.assertEqual(len(list(self.composite_resource.logical_files)), 1)
logical_file = list(self.composite_resource.logical_files)[0]
self.assertEqual(logical_file.files.count(), 2)
base_nc_file_name, _ = os.path.splitext(self.netcdf_file_name)
expected_file_folder = new_folder
for res_file in logical_file.files.all():
self.assertEqual(res_file.file_folder, expected_file_folder)
self.assertTrue(isinstance(logical_file, NetCDFLogicalFile))
self.assertTrue(logical_file.metadata, NetCDFLogicalFile)
# test the location of the file that's not part of the netcdf aggregation
other_res_file = None
for res_file in self.composite_resource.files.all():
if not res_file.has_logical_file:
other_res_file = res_file
break
self.assertEqual(other_res_file.file_folder, new_folder)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_nc_file_4(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is not at the root of the folder hierarchy but in a folder. The
# same folder contains another folder
# location nc file before aggregation is created: /my_folder/netcdf_valid.nc
# location of another folder before aggregation is created: /my_folder/another_folder
# location of nc file after aggregation is created:
# /my_folder/netcdf_valid.nc
self.create_composite_resource()
new_folder = 'my_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
another_folder = '{}/another_folder'.format(new_folder)
ResourceFile.create_folder(self.composite_resource, another_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# test logical file/aggregation
self.assertEqual(len(list(self.composite_resource.logical_files)), 1)
logical_file = list(self.composite_resource.logical_files)[0]
self.assertEqual(logical_file.files.count(), 2)
base_nc_file_name, _ = os.path.splitext(self.netcdf_file_name)
expected_file_folder = new_folder
for res_file in logical_file.files.all():
self.assertEqual(res_file.file_folder, expected_file_folder)
self.assertTrue(isinstance(logical_file, NetCDFLogicalFile))
self.assertTrue(logical_file.metadata, NetCDFLogicalFile)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_for_netcdf_resource_title(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# and testing that the resource title gets set with the
# extracted metadata if the original title is 'untitled resource'
self.res_title = 'untitled resource'
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.metadata.title.value, self.res_title)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test resource title was updated with the extracted netcdf data
res_title = "Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010"
self.assertEqual(self.composite_resource.metadata.title.value, res_title)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_invalid_nc_file_1(self):
# here we are using an invalid netcdf file for setting it
# to netCDF file type which should fail
self.create_composite_resource(self.netcdf_invalid_file)
self._test_invalid_file()
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_invalid_nc_file_2(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which already been previously set to this file type - should fail
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# set nc file to aggregation
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# check that the nc resource file is associated with a logical file
res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.nc')[0]
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile")
# trying to set this nc file again to netcdf file type should raise
# ValidationError
with self.assertRaises(ValidationError):
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_metadata_CRUD(self):
# here we are using a valid nc file for creating a NetCDF file type (aggregation)
# then testing with metadata CRUD actions for the aggregation
self.create_composite_resource()
new_folder = 'nc_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
# make the netcdf file part of the NetCDFLogicalFile
res_file = self.composite_resource.files.first()
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
netcdf_logical_file = NetCDFLogicalFile.create(self.composite_resource)
netcdf_logical_file.save()
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
netcdf_logical_file.add_resource_file(res_file)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.logical_file_type_name, 'NetCDFLogicalFile')
self.assertEqual(netcdf_logical_file.files.count(), 1)
# create keywords - note it is possible to have duplicate keywords
# appropriate view functions need to disallow duplicate keywords
keywords = ['key-1', 'key-1', 'key-2']
netcdf_logical_file.metadata.keywords = keywords
netcdf_logical_file.metadata.save()
self.assertEqual(len(keywords), len(netcdf_logical_file.metadata.keywords))
for keyword in keywords:
self.assertIn(keyword, netcdf_logical_file.metadata.keywords)
# create OriginalCoverage element
self.assertEqual(netcdf_logical_file.metadata.original_coverage, None)
coverage_data = {'northlimit': 121.345, 'southlimit': 42.678, 'eastlimit': 123.789,
'westlimit': 40.789, 'units': 'meters'}
netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data)
self.assertNotEqual(netcdf_logical_file.metadata.original_coverage, None)
self.assertEqual(float(netcdf_logical_file.metadata.original_coverage.value['northlimit']),
121.345)
# test updating OriginalCoverage element
orig_coverage = netcdf_logical_file.metadata.original_coverage
coverage_data = {'northlimit': 111.333, 'southlimit': 42.678, 'eastlimit': 123.789,
'westlimit': 40.789, 'units': 'meters'}
netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id,
value=coverage_data)
self.assertEqual(float(netcdf_logical_file.metadata.original_coverage.value['northlimit']),
111.333)
# trying to create a 2nd OriginalCoverage element should raise exception
with self.assertRaises(Exception):
netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data)
# trying to update bounding box values with non-numeric values
# (e.g., 'north_limit' key with a non-numeric value) should raise exception
coverage_data = {'northlimit': '121.345a', 'southlimit': 42.678, 'eastlimit': 123.789,
'westlimit': 40.789, 'units': 'meters'}
with self.assertRaises(ValidationError):
netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id,
value=coverage_data)
# test creating spatial coverage
# there should not be any spatial coverage for the netcdf file type
self.assertEqual(netcdf_logical_file.metadata.spatial_coverage, None)
coverage_data = {'projection': 'WGS 84 EPSG:4326', 'northlimit': 41.87,
'southlimit': 41.863,
'eastlimit': -111.505,
'westlimit': -111.511, 'units': 'meters'}
# create spatial coverage
netcdf_logical_file.metadata.create_element('Coverage', type="box", value=coverage_data)
spatial_coverage = netcdf_logical_file.metadata.spatial_coverage
self.assertEqual(float(spatial_coverage.value['northlimit']), 41.87)
# test updating spatial coverage
coverage_data = {'projection': 'WGS 84 EPSG:4326', 'northlimit': 41.87706,
'southlimit': 41.863,
'eastlimit': -111.505,
'westlimit': -111.511, 'units': 'meters'}
netcdf_logical_file.metadata.update_element('Coverage', element_id=spatial_coverage.id,
type="box", value=coverage_data)
spatial_coverage = netcdf_logical_file.metadata.spatial_coverage
self.assertEqual(float(spatial_coverage.value['northlimit']), 41.87706)
# create Variable element
self.assertEqual(netcdf_logical_file.metadata.variables.count(), 0)
variable_data = {'name': 'variable_name', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape'}
netcdf_logical_file.metadata.create_element('Variable', **variable_data)
self.assertEqual(netcdf_logical_file.metadata.variables.count(), 1)
self.assertEqual(netcdf_logical_file.metadata.variables.first().name, 'variable_name')
# test that multiple Variable elements can be created
variable_data = {'name': 'variable_name_2', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape_2'}
netcdf_logical_file.metadata.create_element('Variable', **variable_data)
self.assertEqual(netcdf_logical_file.metadata.variables.count(), 2)
# test update Variable element
variable = netcdf_logical_file.metadata.variables.first()
variable_data = {'name': 'variable_name_updated', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape'}
netcdf_logical_file.metadata.update_element('Variable', variable.id, **variable_data)
variable = netcdf_logical_file.metadata.variables.get(id=variable.id)
self.assertEqual(variable.name, 'variable_name_updated')
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_metadata_on_logical_file_delete(self):
# test that when the NetCDFLogicalFile instance is deleted
# all metadata associated with it also get deleted
self.create_composite_resource(self.netcdf_file)
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test that we have one logical file of type NetCDFLogicalFile as a result
# of metadata extraction
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# test that we have the metadata elements
# there should be 4 Coverage objects - 2 at the resource level and
# the other 2 at the file type level
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(logical_file.metadata.coverages.all().count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertNotEqual(logical_file.metadata.originalCoverage, None)
self.assertEqual(Variable.objects.count(), 5)
self.assertEqual(logical_file.metadata.variables.all().count(), 5)
# delete the logical file
logical_file.logical_delete(self.user)
# test that we have no logical file of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata deleted - there should be 2 resource level coverages
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_remove_aggregation(self):
# test that when an instance NetCDFLogicalFile Type (aggregation) is deleted
# all resource files associated with that aggregation is not deleted but the associated
# metadata is deleted
self.create_composite_resource(self.netcdf_file)
nc_res_file = self.composite_resource.files.first()
base_file_name, _ = os.path.splitext(nc_res_file.file_name)
expected_folder_name = nc_res_file.file_folder
# set the nc file to NetCDFLogicalFile aggregation
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test that we have one logical file (aggregation) of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
logical_file = NetCDFLogicalFile.objects.first()
self.assertEqual(logical_file.files.all().count(), 2)
self.assertEqual(self.composite_resource.files.all().count(), 2)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# delete the aggregation (logical file) object using the remove_aggregation function
# this should delete the system generated txt file when the netcdf logical file was created
logical_file.remove_aggregation()
# test there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# test there is no NetCDFFileMetaData object
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# check the files associated with the aggregation not deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
# check the file folder is not deleted
nc_file = self.composite_resource.files.first()
self.assertTrue(nc_file.file_name.endswith('.nc'))
self.assertEqual(nc_file.file_folder, expected_folder_name)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_metadata_on_resource_delete(self):
# test that when the composite resource is deleted
# all metadata associated with NetCDFLogicalFile Type is deleted
self.create_composite_resource(self.netcdf_file)
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test that we have one logical file of type NetCDFLogicalFile as a result
# of metadata extraction
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
# test that we have the metadata elements
# there should be 4 Coverage objects - 2 at the resource level and
# the other 2 at the file type level
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(Variable.objects.count(), 5)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
# delete resource
hydroshare.delete_resource(self.composite_resource.short_id)
# test that we have no logical file of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata deleted
self.assertEqual(Coverage.objects.count(), 0)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
def test_aggregation_metadata_on_file_delete(self):
# test that when any resource file that is part of a NetCDFLogicalFile is deleted
# all metadata associated with NetCDFLogicalFile is deleted
# test for both .nc and .txt delete
# test with deleting of 'nc' file
self._test_file_metadata_on_file_delete(ext='.nc')
# test with deleting of 'txt' file
self._test_file_metadata_on_file_delete(ext='.txt')
def test_aggregation_folder_delete(self):
# when a file is set to NetCDFLogicalFile type
# system automatically creates folder using the name of the file
# that was used to set the file type
# Here we need to test that when that folder gets deleted, all files
# in that folder gets deleted, the logicalfile object gets deleted and
# the associated metadata objects get deleted
self.create_composite_resource()
new_folder = 'nc_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
base_file_name, _ = os.path.splitext(nc_res_file.file_name)
expected_folder_name = nc_res_file.file_folder
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test that we have one logical file of type NetCDFLogicalFile as a result
# of metadata extraction
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
# should have one NetCDFFileMetadata object
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
# there should be 2 content files
self.assertEqual(self.composite_resource.files.count(), 2)
# test that there are metadata associated with the logical file
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(Variable.objects.count(), 5)
# delete the folder for the logical file
folder_path = "data/contents/{}".format(expected_folder_name)
remove_folder(self.user, self.composite_resource.short_id, folder_path)
# there should no content files
self.assertEqual(self.composite_resource.files.count(), 0)
# there should not be any netCDF logical file or metadata file
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata associated with the logical file got deleted - there still be
# 2 resource level coverages
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_file_rename(self):
# test that a file can't renamed for any resource file
# that's part of the NetCDF logical file
self.create_composite_resource()
self.add_file_to_resource(file_to_add=self.netcdf_file)
res_file = self.composite_resource.files.first()
expected_folder_path = res_file.file_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test renaming of files that are associated with aggregation raises exception
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
base_file_name, ext = os.path.splitext(res_file.file_name)
self.assertEqual(res_file.file_folder, expected_folder_path)
if expected_folder_path is not None:
src_path = 'data/contents/{0}/{1}'.format(expected_folder_path, res_file.file_name)
else:
src_path = 'data/contents/{}'.format(res_file.file_name)
new_file_name = 'some_netcdf.{}'.format(ext)
self.assertNotEqual(res_file.file_name, new_file_name)
if expected_folder_path is not None:
tgt_path = 'data/contents/{}/{}'.format(expected_folder_path, new_file_name)
else:
tgt_path = 'data/contents/{}'.format(new_file_name)
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_file_move(self):
# test any resource file that's part of the NetCDF logical file can't be moved
self.create_composite_resource()
self.add_file_to_resource(file_to_add=self.netcdf_file)
nc_res_file = self.composite_resource.files.first()
# create the aggregation using the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test renaming of files that are associated with raster LFO - which should raise exception
self.assertEqual(self.composite_resource.files.count(), 2)
res_file = self.composite_resource.files.first()
expected_folder_path = nc_res_file.file_folder
self.assertEqual(res_file.file_folder, expected_folder_path)
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# moving any of the resource files to this new folder should raise exception
tgt_path = 'data/contents/{}'.format(new_folder)
for res_file in self.composite_resource.files.all():
with self.assertRaises(DRF_ValidationError):
src_path = os.path.join('data', 'contents', res_file.short_path)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_rename(self):
# test changes to aggregation name, aggregation metadata xml file path, and aggregation
# resource map xml file path on folder (that contains netcdf aggregation) name change
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
expected_folder_path = nc_res_file.file_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, expected_folder_path)
# test aggregation name
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
# test renaming folder
src_path = 'data/contents/{}'.format(expected_folder_path)
tgt_path = 'data/contents/{}_1'.format(expected_folder_path)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, '{}_1'.format(expected_folder_path))
# test aggregation name update
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
nc_res_file.refresh_from_db()
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_parent_folder_rename(self):
# test changes to aggregation name, aggregation metadata xml file path, and aggregation
# resource map xml file path on aggregation folder parent folder name change
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
aggregation_folder_name = new_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test renaming of files that are associated with aggregation raises exception
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, aggregation_folder_name)
# test aggregation name
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
# create a folder to be the parent folder of the aggregation folder
parent_folder = 'parent_folder'
ResourceFile.create_folder(self.composite_resource, parent_folder)
# move the aggregation folder to the parent folder
src_path = 'data/contents/{}'.format(aggregation_folder_name)
tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, aggregation_folder_name)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
file_folder = '{}/{}'.format(parent_folder, aggregation_folder_name)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, file_folder)
# renaming parent folder
parent_folder_rename = 'parent_folder_1'
src_path = 'data/contents/{}'.format(parent_folder)
tgt_path = 'data/contents/{}'.format(parent_folder_rename)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
file_folder = '{}/{}'.format(parent_folder_rename, aggregation_folder_name)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, file_folder)
# test aggregation name after folder rename
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
nc_res_file.refresh_from_db()
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths after folder rename
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_move_1(self):
# test changes to aggregation name, aggregation metadata xml file path, and aggregation
# resource map xml file path on moving a folder that contains netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
aggregation_folder_name = nc_res_file.file_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, aggregation_folder_name)
# create a folder to move the aggregation folder there
parent_folder = 'parent_folder'
ResourceFile.create_folder(self.composite_resource, parent_folder)
# move the aggregation folder to the parent folder
src_path = 'data/contents/{}'.format(aggregation_folder_name)
tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, aggregation_folder_name)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
file_folder = '{0}/{1}'.format(parent_folder, aggregation_folder_name)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, file_folder)
# test aggregation name update
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
nc_res_file.refresh_from_db()
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_move_2(self):
# test a folder can be moved into a folder that contains a netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
aggregation_folder_name = new_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# create a folder to move into the aggregation folder
folder_to_move = 'folder_to_move'
ResourceFile.create_folder(self.composite_resource, folder_to_move)
# move the folder_to_move into the aggregation folder
src_path = 'data/contents/{}'.format(folder_to_move)
tgt_path = 'data/contents/{}'.format(aggregation_folder_name)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_sub_folder_creation(self):
# test a folder can be created inside a folder that contains a netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
self.assertEqual(nc_res_file.file_folder, new_folder)
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
res_file = self.composite_resource.files.first()
self.assertNotEqual(res_file.file_folder, None)
# create a folder inside the aggregation folder
new_folder = '{}/sub_folder'.format(res_file.file_folder)
ResourceFile.create_folder(self.composite_resource, new_folder)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_file_move_to_aggregation_folder_allowed(self):
# test a file can be moved into a folder that contains a netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
self.assertEqual(nc_res_file.file_folder, new_folder)
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
res_file = self.composite_resource.files.first()
self.assertNotEqual(res_file.file_folder, '')
# add a file to the resource which will try to move into the aggregation folder
res_file_to_move = self.add_file_to_resource(file_to_add=self.netcdf_invalid_file)
src_path = os.path.join('data', 'contents', res_file_to_move.short_path)
tgt_path = 'data/contents/{}'.format(res_file.file_folder)
# move file to aggregation folder
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_upload_file_to_aggregation_folder_allowed(self):
# test no file can be uploaded into a folder that represents an aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.file_folder, new_folder)
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
res_file = self.composite_resource.files.first()
self.assertNotEqual(res_file.file_folder, '')
# add a file to the resource at the aggregation folder
self.add_file_to_resource(file_to_add=self.netcdf_invalid_file,
upload_folder=res_file.file_folder)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def _test_invalid_file(self):
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# trying to set this invalid tif file to NetCDF file type should raise
# ValidationError
with self.assertRaises(ValidationError):
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# test that the invalid file did not get deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
def _test_file_metadata_on_file_delete(self, ext):
self.create_composite_resource(self.netcdf_file)
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test that we have one logical file of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# there should be 2 coverage elements - one spatial and the other one temporal
self.assertEqual(logical_file.metadata.coverages.all().count(), 2)
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertNotEqual(logical_file.metadata.temporal_coverage, None)
# there should be one original coverage
self.assertNotEqual(logical_file.metadata.originalCoverage, None)
# testing extended metadata element: variables
self.assertEqual(logical_file.metadata.variables.all().count(), 5)
# there should be 4 coverage objects - 2 at the resource level
# and the other 2 at the file type level
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(Variable.objects.count(), 5)
# delete content file specified by extension (ext parameter)
res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, ext)[0]
hydroshare.delete_resource_file(self.composite_resource.short_id,
res_file.id,
self.user)
# test that we don't have logical file of type NetCDFLogicalFile Type
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata deleted - there should be still 2 resource level coverages
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_main_file(self):
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.has_logical_file, False)
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(1, NetCDFLogicalFile.objects.count())
self.assertEqual(".nc", NetCDFLogicalFile.objects.first().get_main_file_type())
self.assertEqual(self.netcdf_file_name,
NetCDFLogicalFile.objects.first().get_main_file.file_name)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
| hydroshare/hydroshare | hs_file_types/tests/test_netcdf_metadata.py | Python | bsd-3-clause | 51,039 | [
"NetCDF"
] | cb71d5aaef65a79d7171d11149e4776b63a141d8709c578c44a8d378ce9ffe4e |
import numpy as np
from scipy import ndimage
from dipy.align import floating
from dipy.align.metrics import SSDMetric, CCMetric, EMMetric
from numpy.testing import (assert_array_equal,
assert_array_almost_equal,
assert_raises)
def test_exceptions():
for invalid_dim in [-1, 0, 1, 4, 5]:
assert_raises(ValueError, CCMetric, invalid_dim)
assert_raises(ValueError, EMMetric, invalid_dim)
assert_raises(ValueError, SSDMetric, invalid_dim)
assert_raises(ValueError, SSDMetric, 3, step_type='unknown_metric_name')
assert_raises(ValueError, EMMetric, 3, step_type='unknown_metric_name')
def test_EMMetric_image_dynamics():
np.random.seed(7181309)
metric = EMMetric(2)
target_shape = (10, 10)
# create a random image
image = np.ndarray(target_shape, dtype=floating)
image[...] = np.random.randint(
0, 10, np.size(image)).reshape(tuple(target_shape))
# compute the expected binary mask
expected = (image > 0).astype(np.int32)
metric.use_static_image_dynamics(image, None)
assert_array_equal(expected, metric.static_image_mask)
metric.use_moving_image_dynamics(image, None)
assert_array_equal(expected, metric.moving_image_mask)
def test_em_demons_step_2d():
r"""
Compares the output of the demons step in 2d against an analytical
step. The fixed image is given by $F(x) = \frac{1}{2}||x - c_f||^2$, the
moving image is given by $G(x) = \frac{1}{2}||x - c_g||^2$,
$x, c_f, c_g \in R^{2}$
References
----------
[Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.
(2009). Diffeomorphic demons: efficient non-parametric
image registration. NeuroImage, 45(1 Suppl), S61-72.
doi:10.1016/j.neuroimage.2008.10.040
"""
# Select arbitrary images' shape (same shape for both images)
sh = (20, 10)
# Select arbitrary centers
c_f = np.asarray(sh) / 2
c_g = c_f + 0.5
# Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
X = np.ndarray(sh + (2,), dtype=np.float64)
O = np.ones(sh)
X[..., 0] = x_0[:, None] * O
X[..., 1] = x_1[None, :] * O
# Compute the gradient fields of F and G
grad_F = X - c_f
grad_G = X - c_g
# The squared norm of grad_G to be used later
sq_norm_grad_F = np.sum(grad_F**2, -1)
sq_norm_grad_G = np.sum(grad_G**2, -1)
# Compute F and G
F = 0.5 * sq_norm_grad_F
G = 0.5 * sq_norm_grad_G
# Create an instance of EMMetric
metric = EMMetric(2)
metric.static_spacing = np.array([1.2, 1.2])
# The $\sigma_x$ (eq. 4 in [Vercauteren09]) parameter is computed in ANTS
# based on the image's spacing
sigma_x_sq = np.sum(metric.static_spacing**2) / metric.dim
# Set arbitrary values for $\sigma_i$ (eq. 4 in [Vercauteren09])
# The original Demons algorithm used simply |F(x) - G(x)| as an
# estimator, so let's use it as well
sigma_i_sq = (F - G)**2
# Set the properties relevant to the demons methods
metric.smooth = 3.0
metric.gradient_static = np.array(grad_F, dtype=floating)
metric.gradient_moving = np.array(grad_G, dtype=floating)
metric.static_image = np.array(F, dtype=floating)
metric.moving_image = np.array(G, dtype=floating)
metric.staticq_means_field = np.array(F, dtype=floating)
metric.staticq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)
metric.movingq_means_field = np.array(G, dtype=floating)
metric.movingq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)
# compute the step using the implementation under test
actual_forward = metric.compute_demons_step(True)
actual_backward = metric.compute_demons_step(False)
# Now directly compute the demons steps according to eq 4 in
# [Vercauteren09]
num_fwd = sigma_x_sq * (G - F)
den_fwd = sigma_x_sq * sq_norm_grad_F + sigma_i_sq
# This is $J^{P}$ in eq. 4 [Vercauteren09]
expected_fwd = -1 * np.array(grad_F)
expected_fwd[..., 0] *= num_fwd / den_fwd
expected_fwd[..., 1] *= num_fwd / den_fwd
# apply Gaussian smoothing
expected_fwd[..., 0] = ndimage.filters.gaussian_filter(
expected_fwd[..., 0], 3.0)
expected_fwd[..., 1] = ndimage.filters.gaussian_filter(
expected_fwd[..., 1], 3.0)
num_bwd = sigma_x_sq * (F - G)
den_bwd = sigma_x_sq * sq_norm_grad_G + sigma_i_sq
# This is $J^{P}$ in eq. 4 [Vercauteren09]
expected_bwd = -1 * np.array(grad_G)
expected_bwd[..., 0] *= num_bwd / den_bwd
expected_bwd[..., 1] *= num_bwd / den_bwd
# apply Gaussian smoothing
expected_bwd[..., 0] = ndimage.filters.gaussian_filter(
expected_bwd[..., 0], 3.0)
expected_bwd[..., 1] = ndimage.filters.gaussian_filter(
expected_bwd[..., 1], 3.0)
assert_array_almost_equal(actual_forward, expected_fwd)
assert_array_almost_equal(actual_backward, expected_bwd)
def test_em_demons_step_3d():
r"""
Compares the output of the demons step in 3d against an analytical
step. The fixed image is given by $F(x) = \frac{1}{2}||x - c_f||^2$, the
moving image is given by $G(x) = \frac{1}{2}||x - c_g||^2$,
$x, c_f, c_g \in R^{3}$
References
----------
[Vercauteren09] Vercauteren, T., Pennec, X., Perchant, A., & Ayache, N.
(2009). Diffeomorphic demons: efficient non-parametric
image registration. NeuroImage, 45(1 Suppl), S61-72.
doi:10.1016/j.neuroimage.2008.10.040
"""
# Select arbitrary images' shape (same shape for both images)
sh = (20, 15, 10)
# Select arbitrary centers
c_f = np.asarray(sh) / 2
c_g = c_f + 0.5
# Compute the identity vector field I(x) = x in R^2
x_0 = np.asarray(range(sh[0]))
x_1 = np.asarray(range(sh[1]))
x_2 = np.asarray(range(sh[2]))
X = np.ndarray(sh + (3,), dtype=np.float64)
O = np.ones(sh)
X[..., 0] = x_0[:, None, None] * O
X[..., 1] = x_1[None, :, None] * O
X[..., 2] = x_2[None, None, :] * O
# Compute the gradient fields of F and G
grad_F = X - c_f
grad_G = X - c_g
# The squared norm of grad_G to be used later
sq_norm_grad_F = np.sum(grad_F**2, -1)
sq_norm_grad_G = np.sum(grad_G**2, -1)
# Compute F and G
F = 0.5 * sq_norm_grad_F
G = 0.5 * sq_norm_grad_G
# Create an instance of EMMetric
metric = EMMetric(3)
metric.static_spacing = np.array([1.2, 1.2, 1.2])
# The $\sigma_x$ (eq. 4 in [Vercauteren09]) parameter is computed in ANTS
# based on the image's spacing
sigma_x_sq = np.sum(metric.static_spacing**2) / metric.dim
# Set arbitrary values for $\sigma_i$ (eq. 4 in [Vercauteren09])
# The original Demons algorithm used simply |F(x) - G(x)| as an
# estimator, so let's use it as well
sigma_i_sq = (F - G)**2
# Set the properties relevant to the demons methods
metric.smooth = 3.0
metric.gradient_static = np.array(grad_F, dtype=floating)
metric.gradient_moving = np.array(grad_G, dtype=floating)
metric.static_image = np.array(F, dtype=floating)
metric.moving_image = np.array(G, dtype=floating)
metric.staticq_means_field = np.array(F, dtype=floating)
metric.staticq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)
metric.movingq_means_field = np.array(G, dtype=floating)
metric.movingq_sigma_sq_field = np.array(sigma_i_sq, dtype=floating)
# compute the step using the implementation under test
actual_forward = metric.compute_demons_step(True)
actual_backward = metric.compute_demons_step(False)
# Now directly compute the demons steps according to eq 4 in
# [Vercauteren09]
num_fwd = sigma_x_sq * (G - F)
den_fwd = sigma_x_sq * sq_norm_grad_F + sigma_i_sq
expected_fwd = -1 * np.array(grad_F)
expected_fwd[..., 0] *= num_fwd / den_fwd
expected_fwd[..., 1] *= num_fwd / den_fwd
expected_fwd[..., 2] *= num_fwd / den_fwd
# apply Gaussian smoothing
expected_fwd[..., 0] = ndimage.filters.gaussian_filter(
expected_fwd[..., 0], 3.0)
expected_fwd[..., 1] = ndimage.filters.gaussian_filter(
expected_fwd[..., 1], 3.0)
expected_fwd[..., 2] = ndimage.filters.gaussian_filter(
expected_fwd[..., 2], 3.0)
num_bwd = sigma_x_sq * (F - G)
den_bwd = sigma_x_sq * sq_norm_grad_G + sigma_i_sq
expected_bwd = -1 * np.array(grad_G)
expected_bwd[..., 0] *= num_bwd / den_bwd
expected_bwd[..., 1] *= num_bwd / den_bwd
expected_bwd[..., 2] *= num_bwd / den_bwd
# apply Gaussian smoothing
expected_bwd[..., 0] = ndimage.filters.gaussian_filter(
expected_bwd[..., 0], 3.0)
expected_bwd[..., 1] = ndimage.filters.gaussian_filter(
expected_bwd[..., 1], 3.0)
expected_bwd[..., 2] = ndimage.filters.gaussian_filter(
expected_bwd[..., 2], 3.0)
assert_array_almost_equal(actual_forward, expected_fwd)
assert_array_almost_equal(actual_backward, expected_bwd)
if __name__ == '__main__':
test_em_demons_step_2d()
test_em_demons_step_3d()
test_exceptions()
test_EMMetric_image_dynamics()
| villalonreina/dipy | dipy/align/tests/test_metrics.py | Python | bsd-3-clause | 9,239 | [
"Gaussian"
] | 7e1dbedad84ddb71a165218eed7884de6cc3c53eb5613cb2ea72ec1ad89774db |
# -*- coding: utf-8 -*-
# Version: 0.17
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried {}".format(commands))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.17 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.17) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if ".post" in rendered:
# update the existing post tag
start = rendered.index(".post") + 5
if len(rendered) == start:
rendered += "%d" % pieces["distance"]
else:
end = start + 1
while end <= len(rendered) and rendered[start:end].isdigit():
end += 1
end -= 1
distance = pieces["distance"]
if start != end:
distance += int(rendered[start:end])
rendered = rendered[:start] + "%d" % distance + rendered[end:]
else:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file {} {}".format(versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| scikit-build/ninja-python-distributions | versioneer.py | Python | apache-2.0 | 69,301 | [
"Brian"
] | 908af127bf45df4a83917278b0c728785cc81ed8c4330d014d44d091d72f0e57 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.