repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rolandmansilla/microblog | flask/lib/python2.7/site-packages/flask/ctx.py | 776 | 14266 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| bsd-3-clause |
jaysonkelly/Marlin | buildroot/share/scripts/createTemperatureLookupMarlin.py | 89 | 6252 | #!/usr/bin/python
"""Thermistor Value Lookup Table Generator
Generates lookup to temperature values for use in a microcontroller in C format based on:
http://en.wikipedia.org/wiki/Steinhart-Hart_equation
The main use is for Arduino programs that read data from the circuit board described here:
http://reprap.org/wiki/Temperature_Sensor_v2.0
Usage: python createTemperatureLookup.py [options]
Options:
-h, --help show this help
--rp=... pull-up resistor
--t1=ttt:rrr low temperature temperature:resistance point (around 25 degC)
--t2=ttt:rrr middle temperature temperature:resistance point (around 150 degC)
--t3=ttt:rrr high temperature temperature:resistance point (around 250 degC)
--num-temps=... the number of temperature points to calculate (default: 36)
"""
from math import *
import sys
import getopt
"Constants"
ZERO = 273.15 # zero point of Kelvin scale
VADC = 5 # ADC voltage
VCC = 5 # supply voltage
ARES = pow(2,10) # 10 Bit ADC resolution
VSTEP = VADC / ARES # ADC voltage resolution
TMIN = 0 # lowest temperature in table
TMAX = 350 # highest temperature in table
class Thermistor:
"Class to do the thermistor maths"
def __init__(self, rp, t1, r1, t2, r2, t3, r3):
l1 = log(r1)
l2 = log(r2)
l3 = log(r3)
y1 = 1.0 / (t1 + ZERO) # adjust scale
y2 = 1.0 / (t2 + ZERO)
y3 = 1.0 / (t3 + ZERO)
x = (y2 - y1) / (l2 - l1)
y = (y3 - y1) / (l3 - l1)
c = (y - x) / ((l3 - l2) * (l1 + l2 + l3))
b = x - c * (l1**2 + l2**2 + l1*l2)
a = y1 - (b + l1**2 *c)*l1
if c < 0:
print "//////////////////////////////////////////////////////////////////////////////////////"
print "// WARNING: negative coefficient 'c'! Something may be wrong with the measurements! //"
print "//////////////////////////////////////////////////////////////////////////////////////"
c = -c
self.c1 = a # Steinhart-Hart coefficients
self.c2 = b
self.c3 = c
self.rp = rp # pull-up resistance
def resol(self, adc):
"Convert ADC reading into a resolution"
res = self.temp(adc)-self.temp(adc+1)
return res
def voltage(self, adc):
"Convert ADC reading into a Voltage"
return adc * VSTEP # convert the 10 bit ADC value to a voltage
def resist(self, adc):
"Convert ADC reading into a resistance in Ohms"
r = self.rp * self.voltage(adc) / (VCC - self.voltage(adc)) # resistance of thermistor
return r
def temp(self, adc):
"Convert ADC reading into a temperature in Celcius"
l = log(self.resist(adc))
Tinv = self.c1 + self.c2*l + self.c3* l**3 # inverse temperature
return (1/Tinv) - ZERO # temperature
def adc(self, temp):
"Convert temperature into a ADC reading"
x = (self.c1 - (1.0 / (temp+ZERO))) / (2*self.c3)
y = sqrt((self.c2 / (3*self.c3))**3 + x**2)
r = exp((y-x)**(1.0/3) - (y+x)**(1.0/3))
return (r / (self.rp + r)) * ARES
def main(argv):
"Default values"
t1 = 25 # low temperature in Kelvin (25 degC)
r1 = 100000 # resistance at low temperature (10 kOhm)
t2 = 150 # middle temperature in Kelvin (150 degC)
r2 = 1641.9 # resistance at middle temperature (1.6 KOhm)
t3 = 250 # high temperature in Kelvin (250 degC)
r3 = 226.15 # resistance at high temperature (226.15 Ohm)
rp = 4700; # pull-up resistor (4.7 kOhm)
num_temps = 36; # number of entries for look-up table
try:
opts, args = getopt.getopt(argv, "h", ["help", "rp=", "t1=", "t2=", "t3=", "num-temps="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--rp":
rp = int(arg)
elif opt == "--t1":
arg = arg.split(':')
t1 = float(arg[0])
r1 = float(arg[1])
elif opt == "--t2":
arg = arg.split(':')
t2 = float(arg[0])
r2 = float(arg[1])
elif opt == "--t3":
arg = arg.split(':')
t3 = float(arg[0])
r3 = float(arg[1])
elif opt == "--num-temps":
num_temps = int(arg)
t = Thermistor(rp, t1, r1, t2, r2, t3, r3)
increment = int((ARES-1)/(num_temps-1));
step = (TMIN-TMAX) / (num_temps-1)
low_bound = t.temp(ARES-1);
up_bound = t.temp(1);
min_temp = int(TMIN if TMIN > low_bound else low_bound)
max_temp = int(TMAX if TMAX < up_bound else up_bound)
temps = range(max_temp, TMIN+step, step);
print "// Thermistor lookup table for Marlin"
print "// ./createTemperatureLookupMarlin.py --rp=%s --t1=%s:%s --t2=%s:%s --t3=%s:%s --num-temps=%s" % (rp, t1, r1, t2, r2, t3, r3, num_temps)
print "// Steinhart-Hart Coefficients: a=%.15g, b=%.15g, c=%.15g " % (t.c1, t.c2, t.c3)
print "// Theoretical limits of termistor: %.2f to %.2f degC" % (low_bound, up_bound)
print
print "#define NUMTEMPS %s" % (len(temps))
print "const short temptable[NUMTEMPS][2] PROGMEM = {"
for temp in temps:
adc = t.adc(temp)
print " { (short) (%7.2f * OVERSAMPLENR ), %4s }%s // v=%.3f\tr=%.3f\tres=%.3f degC/count" % (adc , temp, \
',' if temp != temps[-1] else ' ', \
t.voltage(adc), \
t.resist( adc), \
t.resol( adc) \
)
print "};"
def usage():
print __doc__
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 |
tanghaibao/jcvi | jcvi/projects/vanilla.py | 1 | 11915 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Plotting scripts for the vanilla genome paper.
"""
import logging
import sys
from jcvi.apps.base import ActionDispatcher, OptionParser
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.formats.base import get_number
from jcvi.formats.bed import Bed
from jcvi.graphics.base import normalize_axes, panel_labels, plt, savefig
from jcvi.graphics.glyph import TextCircle
from jcvi.graphics.synteny import Synteny, draw_gene_legend
def main():
actions = (
# Chromosome painting since WGD
("ancestral", "paint 14 chromosomes following alpha WGD (requires data)"),
# main figures in text
("ploidy", "plot vanilla synteny (requires data)"),
# Composite phylogeny - tree and ks
("phylogeny", "create a composite figure with tree and ks"),
("tree", "create a separate figure with tree"),
("ks", "create a separate figure with ks"),
# Composite synteny - wgd and microsynteny
("synteny", "create a composite figure with wgd and microsynteny"),
("wgd", "create separate figures with wgd"),
("microsynteny", "create separate figures with microsynteny"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def phylogeny(args):
"""
%prog phylogeny treefile ks.layout
Create a composite figure with (A) tree and (B) ks.
"""
from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree
p = OptionParser(phylogeny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x12")
(datafile, layoutfile) = args
logging.debug("Load tree file `{0}`".format(datafile))
t, hpd = parse_tree(datafile)
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ax1 = fig.add_axes([0, 0.4, 1, 0.6])
ax2 = fig.add_axes([0.12, 0.065, 0.8, 0.3])
margin, rmargin = 0.1, 0.2 # Left and right margin
leafinfo = LeafInfoFile("leafinfo.csv").cache
wgdinfo = WGDInfoFile("wgdinfo.csv").cache
outgroup = "ginkgo"
# Panel A
draw_tree(
ax1,
t,
hpd=hpd,
margin=margin,
rmargin=rmargin,
supportcolor=None,
internal=False,
outgroup=outgroup,
reroot=False,
leafinfo=leafinfo,
wgdinfo=wgdinfo,
geoscale=True,
)
from jcvi.apps.ks import Layout, KsPlot, KsFile
# Panel B
ks_min = 0.0
ks_max = 3.0
bins = 60
fill = False
layout = Layout(layoutfile)
print(layout, file=sys.stderr)
kp = KsPlot(ax2, ks_max, bins, legendp="upper right")
for lo in layout:
data = KsFile(lo.ksfile)
data = [x.ng_ks for x in data]
data = [x for x in data if ks_min <= x <= ks_max]
kp.add_data(
data,
lo.components,
label=lo.label,
color=lo.color,
marker=lo.marker,
fill=fill,
fitted=False,
kde=True,
)
kp.draw(filename=None)
normalize_axes([root, ax1])
labels = ((0.05, 0.95, "A"), (0.05, 0.4, "B"))
panel_labels(root, labels)
image_name = "phylogeny.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def tree(args):
"""
%prog tree treefile
Create a tree figure.
"""
from jcvi.graphics.tree import parse_tree, LeafInfoFile, WGDInfoFile, draw_tree
p = OptionParser(tree.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x8")
(datafile,) = args
logging.debug("Load tree file `{0}`".format(datafile))
t, hpd = parse_tree(datafile)
fig = plt.figure(1, (iopts.w, iopts.h))
ax1 = fig.add_axes([0, 0, 1, 1])
margin, rmargin = 0.1, 0.2 # Left and right margin
leafinfo = LeafInfoFile("leafinfo.csv").cache
wgdinfo = WGDInfoFile("wgdinfo.csv").cache
outgroup = "ginkgo"
# Panel A
draw_tree(
ax1,
t,
hpd=hpd,
margin=margin,
rmargin=rmargin,
supportcolor=None,
internal=False,
outgroup=outgroup,
reroot=False,
leafinfo=leafinfo,
wgdinfo=wgdinfo,
geoscale=True,
)
normalize_axes([ax1])
image_name = "tree.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def ks(args):
"""
%prog ks ks.layout
Create a ks figure.
"""
p = OptionParser(ks.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x4")
(layoutfile,) = args
from jcvi.apps.ks import Layout, KsPlot, KsFile
fig = plt.figure(1, (iopts.w, iopts.h))
ax2 = fig.add_axes([0.12, 0.12, 0.8, 0.8])
# Panel B
ks_min = 0.0
ks_max = 3.0
bins = 60
fill = False
layout = Layout(layoutfile)
print(layout, file=sys.stderr)
kp = KsPlot(ax2, ks_max, bins, legendp="upper right")
for lo in layout:
data = KsFile(lo.ksfile)
data = [x.ng_ks for x in data]
data = [x for x in data if ks_min <= x <= ks_max]
kp.add_data(
data,
lo.components,
label=lo.label,
color=lo.color,
marker=lo.marker,
fill=fill,
fitted=False,
kde=True,
)
kp.draw(filename=None)
image_name = "ks.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def synteny(args):
"""
%prog synteny vplanifoliaA_blocks.bed vplanifoliaA.sizes \
b1.blocks all.bed b1.layout
Create a composite figure with (A) wgd and (B) microsynteny.
"""
from jcvi.graphics.chromosome import draw_chromosomes
p = OptionParser(synteny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x12")
(bedfile, sizesfile, blocksfile, allbedfile, blockslayout) = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ax1 = fig.add_axes([0, 0.5, 1, 0.5])
ax2 = fig.add_axes([0.02, 0, 0.98, 0.5])
# Panel A
title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$"
draw_chromosomes(
ax1,
bedfile,
sizes=sizesfile,
iopts=iopts,
mergedist=200000,
winsize=50000,
imagemap=False,
gauge=True,
legend=False,
title=title,
)
# Panel B
draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout)
normalize_axes([root, ax1, ax2])
labels = ((0.05, 0.95, "A"), (0.05, 0.5, "B"))
panel_labels(root, labels)
image_name = "synteny.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def wgd(args):
"""
%prog wgd vplanifoliaA_blocks.bed vplanifoliaA.sizes
Create a wgd figure.
"""
from jcvi.graphics.chromosome import draw_chromosomes
p = OptionParser(synteny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
(bedfile, sizesfile) = args
fig = plt.figure(1, (iopts.w, iopts.h))
ax1 = fig.add_axes([0, 0, 1, 1])
title = r"Genome duplication $\alpha^{O}$ event in $\textit{Vanilla}$"
draw_chromosomes(
ax1,
bedfile,
sizes=sizesfile,
iopts=iopts,
mergedist=200000,
winsize=50000,
imagemap=False,
gauge=True,
legend=False,
title=title,
)
normalize_axes([ax1])
image_name = "wgd.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def microsynteny(args):
"""
%prog microsynteny b1.blocks all.bed b1.layout
Create a microsynteny figure.
"""
p = OptionParser(synteny.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x6")
(blocksfile, allbedfile, blockslayout) = args
fig = plt.figure(1, (iopts.w, iopts.h))
ax2 = fig.add_axes([0, 0, 1, 1])
draw_ploidy(fig, ax2, blocksfile, allbedfile, blockslayout)
normalize_axes([ax2])
image_name = "microsynteny.pdf"
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def ancestral(args):
"""
%prog ancestral vplanifoliaA.vplanifoliaA.anchors > vplanifoliaA_blocks.bed
Paint 14 chromosomes following alpha WGD.
"""
p = OptionParser(ancestral.__doc__)
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(anchorsfile,) = args
qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts)
# We focus on the following chromosome pairs
target_pairs = {
(1, 1),
(1, 6),
(1, 8),
(1, 13),
(2, 4),
(3, 12),
(3, 14),
(5, 6),
(5, 8),
(7, 9),
(7, 11),
(9, 10),
(10, 11),
}
def get_target(achr, bchr):
if "chr" not in achr and "chr" not in bchr:
return None
achr, bchr = get_number(achr), get_number(bchr)
if achr > bchr:
achr, bchr = bchr, achr
if (achr, bchr) in target_pairs:
return achr, bchr
return None
def build_bedline(astart, aend, target_pair):
# target_name = "{:02d}-{:02d}".format(*target_pair)
target_name = [str(x) for x in target_pair if x in (1, 2, 3, 5, 7, 10)][0]
return "\t".join(
str(x) for x in (astart.seqid, astart.start, aend.end, target_name)
)
# Iterate through the blocks, store any regions that has hits to one of the
# target_pairs
ac = AnchorFile(anchorsfile)
blocks = ac.blocks
outbed = Bed()
for i, block in enumerate(blocks):
a, b, scores = zip(*block)
a = [qorder[x] for x in a]
b = [sorder[x] for x in b]
astart, aend = min(a)[1], max(a)[1]
bstart, bend = min(b)[1], max(b)[1]
# Now convert to BED lines with new accn
achr, bchr = astart.seqid, bstart.seqid
target = get_target(achr, bchr)
if target is None:
continue
outbed.add(build_bedline(astart, aend, target))
outbed.add(build_bedline(bstart, bend, target))
outbed.print_to_file(sorted=True)
def ploidy(args):
"""
%prog ploidy b1.blocks all.bed b1.layout
Build a figure that illustrates the WGD history of the vanilla genome.
"""
p = OptionParser(ploidy.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="12x6")
if len(args) != 3:
sys.exit(not p.print_help())
blocksfile, bedfile, blockslayout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
draw_ploidy(fig, root, blocksfile, bedfile, blockslayout)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "vanilla-karyotype"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def draw_ploidy(fig, root, blocksfile, bedfile, blockslayout):
switchidsfile = "switch.ids"
Synteny(
fig,
root,
blocksfile,
bedfile,
blockslayout,
scalebar=True,
switch=switchidsfile,
)
# Legend showing the orientation of the genes
draw_gene_legend(root, 0.2, 0.3, 0.53)
# WGD labels
radius = 0.025
tau_color = "#bebada"
alpha_color = "#bc80bd"
label_color = "k"
pad = 0.05
for y in (0.74 + 1.5 * pad, 0.26 - 1.5 * pad):
TextCircle(
root,
0.25,
y,
r"$\alpha^{O}$",
radius=radius,
fc=alpha_color,
color=label_color,
fontweight="bold",
)
TextCircle(
root,
0.75,
y,
r"$\alpha^{O}$",
radius=radius,
fc=alpha_color,
color=label_color,
fontweight="bold",
)
for y in (0.74 + 3 * pad, 0.26 - 3 * pad):
TextCircle(
root, 0.5, y, r"$\tau$", radius=radius, fc=tau_color, color=label_color
)
if __name__ == "__main__":
main()
| bsd-2-clause |
TNosredna/CouchPotatoServer | libs/requests/packages/charade/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| gpl-3.0 |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/gpstart/test_gpstart.py | 9 | 8432 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
from time import sleep
import unittest2 as unittest
import tinctest
from gppylib.commands.base import Command
from mpp.models import MPPTestCase
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify
from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby
from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility
from mpp.gpdb.tests.storage.walrepl.gpactivatestandby import GpactivateStandby
class GpstartTestCase(MPPTestCase):
'''
testcase for gpstart
gpstart may return status code 1 as well as 0 in the success case. The
difference is whether it produces WARNING or not, but here we don't care.
'''
origin_mdd = os.environ.get('MASTER_DATA_DIRECTORY')
def __init__(self,methodName):
self.gputil = GpUtility()
self.stdby = StandbyVerify()
super(GpstartTestCase,self).__init__(methodName)
def setUp(self):
self.gputil.check_and_start_gpdb()
stdby_presence = self.gputil.check_standby_presence()
# We should forcibly recreate standby, as it might has been promoted.
if stdby_presence:
self.gputil.remove_standby()
self.gputil.install_standby()
def tearDown(self):
self.gputil.remove_standby()
"""
Gpstart test cases in recovery mode
"""
def test_gpstart_from_master(self):
"""
tag
"""
self.gputil.check_and_stop_gpdb()
(rc, stdout) = self.gputil.run('gpstart -a ')
self.assertIn(rc, (0, 1))
self.assertTrue(self.gputil.gpstart_and_verify())
sleep(2)
self.assertTrue(self.stdby.check_gp_segment_config(),'standby master not cofigured')
self.assertTrue(self.stdby.check_pg_stat_replication(),'standby not in replication status')
self.assertTrue(self.stdby.check_standby_processes(), 'standby processes not running')
(rc, output) = self.gputil.run(command = 'ps -ef|grep "wal sender "|grep -v grep')
self.assertIsNotNone(output)
def test_gpstart_master_only(self):
"""
tag
"""
self.gputil.check_and_stop_gpdb()
(rc, stdout) = self.gputil.run('export GPSTART_INTERNAL_MASTER_ONLY=1; '
'gpstart -a -m ')
self.assertIn(rc, (0, 1))
self.assertTrue(self.gputil.gpstart_and_verify())
(rc,output) = self.gputil.run('PGDATABASE=template1 '
"PGOPTIONS='-c gp_session_role=utility' "
'psql')
self.assertEqual(rc, 0)
(rc, output) = self.gputil.run('psql template1')
# should fail due to master only mode
self.assertEqual(rc, 2)
self.gputil.run('gpstop -a -m')
self.gputil.run('gpstart -a')
def test_gpstart_restricted_mode_master(self):
"""Test -R option with standby."""
self.gputil.check_and_stop_gpdb()
(rc, stdout) = self.gputil.run('gpstart -a -R')
self.assertIn(rc, (0, 1))
self.assertTrue(self.gputil.gpstart_and_verify())
(rc,output) = self.gputil.run(command = 'psql template1')
self.assertIn(rc, (0, 1))
self.gputil.run('gpstop -ar')
def test_gpstart_master_w_timeout(self):
"""Test -t option with standby."""
self.gputil.check_and_stop_gpdb()
(rc, output) = self.gputil.run('gpstart -a -t 30')
self.assertIn(rc, (0, 1))
self.assertTrue(self.gputil.gpstart_and_verify())
self.gputil.run('gpstop -ar')
def test_gpstart_no_standby(self):
"""Test -y with standby configured."""
self.gputil.check_and_stop_gpdb()
(rc, stdout) = self.gputil.run('gpstart -a -y')
self.assertIn(rc, (0, 1))
self.assertTrue(self.gputil.gpstart_and_verify())
self.assertFalse(self.stdby.check_standby_processes(),
'gpstart without standby failed, standby was running')
self.gputil.run('gpstop -ar')
def test_gpstart_wo_standby(self):
"""Test -y without standby configured."""
self.gputil.remove_standby()
self.gputil.check_and_stop_gpdb()
(rc, stdout) = self.gputil.run('gpstart -a -y')
self.assertIn(rc, (0, 1))
self.assertTrue(self.gputil.gpstart_and_verify())
self.assertFalse(self.stdby.check_standby_processes(), 'standby processes presented')
self.gputil.run('gpstop -ar')
"""
Gpstart, test case in failover mode
"""
def test_gpstart_master_only_after_failover(self):
"""
for test purpose, failing back to old master should
remove standby from primary after activate standby
"""
tinctest.logger.info("start master only with -m option after failover")
activatestdby = GpactivateStandby()
standby_host = activatestdby.get_current_standby()
standby_mdd = activatestdby.get_standby_dd()
standby_port = activatestdby.get_standby_port()
activatestdby.activate()
self.stdby._run_remote_command(standby_host,command = 'gpstop -a')
stdout = self.stdby._run_remote_command(standby_host,command = 'export GPSTART_INTERNAL_MASTER_ONLY=1; gpstart -a -m')
self.assertNotRegexpMatches(stdout,"ERROR","Start master only after failover failed")
self.assertTrue(self.gputil.gpstart_and_verify(master_dd = standby_mdd, host = standby_host))
self.stdby._run_remote_command(standby_host,command = 'gpstop -a -m')
self.gputil.run(command = 'gpstop -ar')
self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)
def test_gpstart_master_after_failover(self):
"""
failover, start from new master, then recover the cluster back to
have the old master active.
"""
tinctest.logger.info("failover, and run gpstart master test")
self.gputil.check_and_start_gpdb()
activatestdby = GpactivateStandby()
standby_host = activatestdby.get_current_standby()
standby_mdd = activatestdby.get_standby_dd()
standby_port = activatestdby.get_standby_port()
activatestdby.activate()
self.stdby._run_remote_command(standby_host, command = 'gpstop -a')
stdout = self.stdby._run_remote_command(standby_host,command = 'gpstart -a')
self.assertNotRegexpMatches(stdout,"FATAL","ERROR")
self.assertTrue(self.gputil.gpstart_and_verify(master_dd = standby_mdd, host = standby_host))
self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)
def test_gpstart_original_master_after_promote(self):
"""
failover, start from new master, then recover the cluster back to
have the old master active.
"""
tinctest.logger.info("activate and run gpstart for original master")
activatestdby = GpactivateStandby()
standby_host = activatestdby.get_current_standby()
standby_mdd = activatestdby.get_standby_dd()
standby_port = activatestdby.get_standby_port()
activatestdby.activate()
(rc, stdout) = self.gputil.run('gpstart -a -v')
self.gputil.run('pg_controldata %s' % self.origin_mdd)
self.stdby._run_remote_command(standby_host, command = 'pg_controldata %s' % standby_mdd)
self.assertNotEqual(rc, 0)
# This below error message comes from gpstart product code (if its modified change it here as well.)
self.assertRegexpMatches(stdout,"Standby activated, this node no more can act as master.")
self.gputil.failback_to_original_master(self.origin_mdd, standby_host, standby_mdd, standby_port)
| apache-2.0 |
floraXiao/gooderp_addons | buy/wizard/buy_order_track_wizard.py | 6 | 4873 | # -*- coding: utf-8 -*-
from datetime import date
from odoo import models, fields, api
from odoo.exceptions import UserError
class BuyOrderTrackWizard(models.TransientModel):
_name = 'buy.order.track.wizard'
_description = u'采购订单跟踪表向导'
@api.model
def _default_date_start(self):
return self.env.user.company_id.start_date
@api.model
def _default_date_end(self):
return date.today()
date_start = fields.Date(u'开始日期', default=_default_date_start,
help=u'报表汇总的开始日期,默认为公司启用日期')
date_end = fields.Date(u'结束日期', default=_default_date_end,
help=u'报表汇总的结束日期,默认为当前日期')
partner_id = fields.Many2one('partner', u'供应商',
help=u'只统计选定的供应商')
goods_id = fields.Many2one('goods', u'商品',
help=u'只统计选定的商品')
order_id = fields.Many2one('buy.order', u'订单号',
help=u'只统计选定的订单号')
warehouse_dest_id = fields.Many2one('warehouse', u'仓库',
help=u'只统计选定的仓库')
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
def _get_domain(self):
'''返回wizard界面上条件'''
domain = [
('order_id.date', '>=', self.date_start),
('order_id.date', '<=', self.date_end)
]
if self.goods_id:
domain.append(('goods_id', '=', self.goods_id.id))
if self.partner_id:
domain.append(('order_id.partner_id', '=', self.partner_id.id))
if self.order_id:
domain.append(('order_id.id', '=', self.order_id.id))
if self.warehouse_dest_id:
domain.append(('order_id.warehouse_dest_id',
'=', self.warehouse_dest_id.id))
return domain
def _get_wh_in_date(self, line):
'''对于一个buy order line,返回一个入库日期'''
wh_in_date = None
move_line = self.env['wh.move.line']
wh_move_line = move_line.search([
('buy_line_id', '=', line.id),
('state', '=', 'done')
])
if len(wh_move_line) > 1: # 如果是分批入库,则入库单明细行上的buy_line_id相同
wh_in_date = wh_move_line[0].date
else:
wh_in_date = wh_move_line.date
return wh_in_date
def _prepare_track_line(self, line, qty, amount, qty_not_in):
'''返回跟踪表明细行(非小计行)'''
return {
'goods_code': line.goods_id.code,
'goods_id': line.goods_id.id,
'attribute': line.attribute_id.name,
'uom': line.uom_id.name,
'date': line.order_id.date,
'order_name': line.order_id.name,
'partner_id': line.order_id.partner_id.id,
'warehouse_dest_id': line.order_id.warehouse_dest_id.id,
'goods_state': line.order_id.goods_state,
'qty': qty,
'amount': amount,
'qty_not_in': qty_not_in,
'planned_date': line.order_id.planned_date,
'wh_in_date': self._get_wh_in_date(line), # 入库日期
'note': line.note,
'type': line.order_id.type,
}
@api.multi
def button_ok(self):
self.ensure_one()
res = []
if self.date_end < self.date_start:
raise UserError(u'开始日期不能大于结束日期!')
buy_order_line = self.env['buy.order.line']
for line in buy_order_line.search(self._get_domain(), order='goods_id'):
is_buy = line.order_id.type == 'buy' and 1 or -1 # 是否购货订单
# 以下分别为明细行上数量、采购额、未入库数量,退货时均取反
qty = is_buy * line.quantity
amount = is_buy * line.subtotal
qty_not_in = is_buy * (line.quantity - line.quantity_in)
# 创建跟踪表明细行(非小计行)
track = self.env['buy.order.track'].create(
self._prepare_track_line(line, qty, amount, qty_not_in))
res.append(track.id)
view = self.env.ref('buy.buy_order_track_tree')
return {
'name': u'采购订单跟踪表',
'view_type': 'form',
'view_mode': 'tree',
'view_id': False,
'views': [(view.id, 'tree')],
'res_model': 'buy.order.track',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', res)],
'limit': 65535,
}
| agpl-3.0 |
octavioturra/aritial | google_appengine/google/appengine/tools/dev_appserver_upload.py | 5 | 10654 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper CGI for POST uploads.
Utility library contains the main logic behind simulating the blobstore
uploading mechanism.
Contents:
GenerateBlobKey: Function for generation unique blob-keys.
UploadCGIHandler: Main CGI handler class for post uploads.
"""
import base64
import cStringIO
import datetime
import md5
import random
import time
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api.blobstore import blobstore
try:
from email.mime import base
from email.mime import multipart
from email import generator
except ImportError:
from email import Generator as generator
from email import MIMEBase as base
from email import MIMEMultipart as multipart
STRIPPED_HEADERS = frozenset(('content-length',
'content-md5',
'content-type',
))
class Error(Exception):
"""Base class for upload processing errors."""
class InvalidMIMETypeFormatError(Error):
"""MIME type was formatted incorrectly."""
def GenerateBlobKey(time_func=time.time, random_func=random.random):
"""Generate a unique BlobKey.
BlobKey is generated using the current time stamp combined with a random
number. The two values are subject to an md5 digest and base64 url-safe
encoded. The new key is checked against the possibility of existence within
the datastore and the random number is regenerated until there is no match.
Args:
time_func: Function used for generating the timestamp. Used for
dependency injection. Allows for predictable results during tests.
Must return a floating point UTC timestamp.
random_func: Function used for generating the random number. Used for
dependency injection. Allows for predictable results during tests.
Returns:
String version of BlobKey that is unique within the BlobInfo datastore.
None if there are too many name conflicts.
"""
timestamp = str(time_func())
tries = 0
while tries < 10:
number = str(random_func())
digester = md5.md5()
digester.update(timestamp)
digester.update(number)
blob_key = base64.urlsafe_b64encode(digester.digest())
datastore_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND,
blob_key,
namespace='')
try:
datastore.Get(datastore_key)
tries += 1
except datastore_errors.EntityNotFoundError:
return blob_key
return None
def _SplitMIMEType(mime_type):
"""Split MIME-type in to main and sub type.
Args:
mime_type: full MIME type string.
Returns:
(main, sub):
main: Main part of mime type (application, image, text, etc).
sub: Subtype part of mime type (pdf, png, html, etc).
Raises:
InvalidMIMETypeFormatError: If form item has incorrectly formatted MIME
type.
"""
if mime_type:
mime_type_array = mime_type.split('/')
if len(mime_type_array) == 1:
raise InvalidMIMETypeFormatError('Missing MIME sub-type.')
elif len(mime_type_array) == 2:
main_type, sub_type = mime_type_array
if not(main_type and sub_type):
raise InvalidMIMETypeFormatError(
'Incorrectly formatted MIME type: %s' % mime_type)
return main_type, sub_type
else:
raise InvalidMIMETypeFormatError(
'Incorrectly formatted MIME type: %s' % mime_type)
else:
return 'application', 'octet-stream'
class UploadCGIHandler(object):
"""Class used for handling an upload post.
The main interface to this class is the UploadCGI method. This will recieve
the upload form, store the blobs contained in the post and rewrite the blobs
to contain BlobKeys instead of blobs.
"""
def __init__(self,
blob_storage,
generate_blob_key=GenerateBlobKey,
now_func=datetime.datetime.now):
"""Constructor.
Args:
blob_storage: BlobStorage instance where actual blobs are stored.
generate_blob_key: Function used for generating unique blob keys.
now_func: Function that returns the current timestamp.
"""
self.__blob_storage = blob_storage
self.__generate_blob_key = generate_blob_key
self.__now_func = now_func
def StoreBlob(self, form_item, creation):
"""Store form-item to blob storage.
Args:
form_item: FieldStorage instance that represents a specific form field.
This instance should have a non-empty filename attribute, meaning that
it is an uploaded blob rather than a normal form field.
creation: Timestamp to associate with new blobs creation time. This
parameter is provided so that all blobs in the same upload form can have
the same creation date.
Returns:
datastore.Entity('__BlobInfo__') associated with the upload.
"""
main_type, sub_type = _SplitMIMEType(form_item.type)
blob_key = self.__generate_blob_key()
self.__blob_storage.StoreBlob(blob_key, form_item.file)
content_type_formatter = base.MIMEBase(main_type, sub_type,
**form_item.type_options)
blob_entity = datastore.Entity('__BlobInfo__',
name=str(blob_key),
namespace='')
blob_entity['content_type'] = (
content_type_formatter['content-type'].decode('utf-8'))
blob_entity['creation'] = creation
blob_entity['filename'] = form_item.filename.decode('utf-8')
form_item.file.seek(0, 2)
size = form_item.file.tell()
form_item.file.seek(0)
blob_entity['size'] = size
datastore.Put(blob_entity)
return blob_entity
def _GenerateMIMEMessage(self, form, boundary=None):
"""Generate a new post from original form.
Also responsible for storing blobs in the datastore.
Args:
form: Instance of cgi.FieldStorage representing the whole form
derived from original post data.
boundary: Boundary to use for resulting form. Used only in tests so
that the boundary is always consistent.
Returns:
A MIMEMultipart instance representing the new HTTP post which should be
forwarded to the developers actual CGI handler. DO NOT use the return
value of this method to generate a string unless you know what you're
doing and properly handle folding whitespace (from rfc822) properly.
"""
message = multipart.MIMEMultipart('form-data', boundary)
for name, value in form.headers.items():
if name.lower() not in STRIPPED_HEADERS:
message.add_header(name, value)
def IterateForm():
"""Flattens form in to single sequence of cgi.FieldStorage instances.
The resulting cgi.FieldStorage objects are a little bit irregular in
their structure. A single name can have mulitple sub-items. In this
case, the root FieldStorage object has a list associated with that field
name. Otherwise, the root FieldStorage object just refers to a single
nested instance.
Lists of FieldStorage instances occur when a form has multiple values
for the same name.
Yields:
cgi.FieldStorage irrespective of their nesting level.
"""
for key in sorted(form):
form_item = form[key]
if isinstance(form_item, list):
for list_item in form_item:
yield list_item
else:
yield form_item
creation = self.__now_func()
for form_item in IterateForm():
disposition_parameters = {'name': form_item.name}
if form_item.filename is None:
variable = base.MIMEBase('text', 'plain')
variable.set_payload(form_item.value)
else:
if not form_item.filename:
continue
disposition_parameters['filename'] = form_item.filename
main_type, sub_type = _SplitMIMEType(form_item.type)
blob_entity = self.StoreBlob(form_item, creation)
variable = base.MIMEBase('message',
'external-body',
access_type=blobstore.BLOB_KEY_HEADER,
blob_key=blob_entity.key().name())
form_item.file.seek(0, 2)
content_length = form_item.file.tell()
form_item.file.seek(0)
external = base.MIMEBase(main_type,
sub_type,
**form_item.type_options)
headers = dict(form_item.headers)
headers['Content-Length'] = str(content_length)
headers[blobstore.UPLOAD_INFO_CREATION_HEADER] = (
blobstore._format_creation(creation))
for key, value in headers.iteritems():
external.add_header(key, value)
external_disposition_parameters = dict(disposition_parameters)
external_disposition_parameters['filename'] = form_item.filename
if not external.get('Content-Disposition'):
external.add_header('Content-Disposition',
'form-data',
**external_disposition_parameters)
variable.set_payload([external])
variable.add_header('Content-Disposition',
'form-data',
**disposition_parameters)
message.attach(variable)
return message
def GenerateMIMEMessageString(self, form, boundary=None):
"""Generate a new post string from original form.
Args:
form: Instance of cgi.FieldStorage representing the whole form
derived from original post data.
boundary: Boundary to use for resulting form. Used only in tests so
that the boundary is always consistent.
Returns:
A string rendering of a MIMEMultipart instance.
"""
message = self._GenerateMIMEMessage(form, boundary=boundary)
message_out = cStringIO.StringIO()
gen = generator.Generator(message_out, maxheaderlen=0)
gen.flatten(message, unixfrom=False)
return message_out.getvalue()
| apache-2.0 |
ovidiu-beldie/closure-linter-tweaked | closure_linter/scopeutil.py | 84 | 5414 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to match goog.scope alias statements."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('[email protected] (Nathan Naze)')
import itertools
from closure_linter import ecmametadatapass
from closure_linter import tokenutil
from closure_linter.javascripttokens import JavaScriptTokenType
def IsGoogScopeBlock(context):
"""Whether the given context is a goog.scope block.
This function only checks that the block is a function block inside
a goog.scope() call.
TODO(nnaze): Implement goog.scope checks that verify the call is
in the root context and contains only a single function literal.
Args:
context: An EcmaContext of type block.
Returns:
Whether the context is a goog.scope block.
"""
if context.type != ecmametadatapass.EcmaContext.BLOCK:
return False
if not _IsFunctionLiteralBlock(context):
return False
# Check that this function is contained by a group
# of form "goog.scope(...)".
parent = context.parent
if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
last_code_token = parent.start_token.metadata.last_code
if (last_code_token and
last_code_token.type is JavaScriptTokenType.IDENTIFIER and
last_code_token.string == 'goog.scope'):
return True
return False
def _IsFunctionLiteralBlock(block_context):
"""Check if a context is a function literal block (without parameters).
Example function literal block: 'function() {}'
Args:
block_context: An EcmaContext of type block.
Returns:
Whether this context is a function literal block.
"""
previous_code_tokens_iter = itertools.ifilter(
lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
reversed(block_context.start_token))
# Ignore the current token
next(previous_code_tokens_iter, None)
# Grab the previous three tokens and put them in correct order.
previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
previous_code_tokens.reverse()
# There aren't three previous tokens.
if len(previous_code_tokens) is not 3:
return False
# Check that the previous three code tokens are "function ()"
previous_code_token_types = [token.type for token in previous_code_tokens]
if (previous_code_token_types == [
JavaScriptTokenType.FUNCTION_DECLARATION,
JavaScriptTokenType.START_PARAMETERS,
JavaScriptTokenType.END_PARAMETERS]):
return True
return False
def IsInClosurizedNamespace(symbol, closurized_namespaces):
"""Match a goog.scope alias.
Args:
symbol: An identifier like 'goog.events.Event'.
closurized_namespaces: Iterable of valid Closurized namespaces (strings).
Returns:
True if symbol is an identifier in a Closurized namespace, otherwise False.
"""
for ns in closurized_namespaces:
if symbol.startswith(ns + '.'):
return True
return False
def MatchAlias(context):
"""Match an alias statement (some identifier assigned to a variable).
Example alias: var MyClass = proj.longNamespace.MyClass.
Args:
context: An EcmaContext of type EcmaContext.VAR.
Returns:
If a valid alias, returns a tuple of alias and symbol, otherwise None.
"""
if context.type != ecmametadatapass.EcmaContext.VAR:
return
# The var's parent is a STATEMENT, which should be directly below goog.scope.
if not IsGoogScopeBlock(context.parent.parent):
return
# Get the tokens in this statement.
if context.start_token and context.end_token:
statement_tokens = tokenutil.GetTokenRange(context.start_token,
context.end_token)
else:
return
# And now just those tokens that are actually code.
is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
code_tokens = filter(is_non_code_type, statement_tokens)
# This section identifies statements of the alias form "var alias = symbol".
# Pop off the semicolon if present.
if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
code_tokens.pop()
if len(code_tokens) < 4:
return
# Verify that this is of the form "var lvalue = identifier;".
# The identifier may span multiple lines and could be multiple tokens.
if (code_tokens[0].IsKeyword('var') and
code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
code_tokens[2].IsOperator('=') and
all(t.IsType(JavaScriptTokenType.IDENTIFIER) for t in code_tokens[3:])):
alias, symbol = code_tokens[1], code_tokens[3]
# Mark both tokens as an alias definition to avoid counting them as usages.
alias.metadata.is_alias_definition = True
symbol.metadata.is_alias_definition = True
return alias.string, tokenutil.GetIdentifierForToken(symbol)
| apache-2.0 |
VasuAgrawal/tartanHacks2015 | site/flask/lib/python2.7/site-packages/pbr/tests/test_version.py | 41 | 1137 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pbr.tests import base
from pbr import version
class DeferredVersionTestCase(base.BaseTestCase):
def test_cached_version(self):
class MyVersionInfo(version.VersionInfo):
def _get_version_from_pkg_resources(self):
return "5.5.5.5"
deferred_string = MyVersionInfo("openstack").\
cached_version_string()
self.assertEqual("5.5.5.5", deferred_string)
| mit |
dataxu/ansible | lib/ansible/modules/network/f5/bigip_virtual_server.py | 25 | 53942 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_virtual_server
short_description: Manage LTM virtual servers on a BIG-IP
description:
- Manage LTM virtual servers on a BIG-IP.
version_added: "2.1"
options:
state:
description:
- The virtual server state. If C(absent), delete the virtual server
if it exists. C(present) creates the virtual server and enable it.
If C(enabled), enable the virtual server if it exists. If C(disabled),
create the virtual server if needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
name:
description:
- Virtual server name.
required: True
aliases:
- vs
destination:
description:
- Destination IP of the virtual server.
- Required when C(state) is C(present) and virtual server does not exist.
required: True
aliases:
- address
- ip
source:
description:
- Specifies an IP address or network from which the virtual server accepts traffic.
- The virtual server accepts clients only from one of these IP addresses.
- For this setting to function effectively, specify a value other than 0.0.0.0/0 or ::/0
(that is, any/0, any6/0).
- In order to maximize utility of this setting, specify the most specific address
prefixes covering all customer addresses and no others.
- Specify the IP address in Classless Inter-Domain Routing (CIDR) format; address/prefix,
where the prefix length is in bits. For example, for IPv4, 10.0.0.1/32 or 10.0.0.0/24,
and for IPv6, ffe1::0020/64 or 2001:ed8:77b5:2:10:10:100:42/64.
version_added: 2.5
port:
description:
- Port of the virtual server. Required when C(state) is C(present)
and virtual server does not exist.
- If you do not want to specify a particular port, use the value C(0).
The result is that the virtual server will listen on any port.
profiles:
description:
- List of profiles (HTTP, ClientSSL, ServerSSL, etc) to apply to both sides
of the connection (client-side and server-side).
- If you only want to apply a particular profile to the client-side of
the connection, specify C(client-side) for the profile's C(context).
- If you only want to apply a particular profile to the server-side of
the connection, specify C(server-side) for the profile's C(context).
- If C(context) is not provided, it will default to C(all).
suboptions:
name:
description:
- Name of the profile.
- If this is not specified, then it is assumed that the profile item is
only a name of a profile.
- This must be specified if a context is specified.
required: false
context:
description:
- The side of the connection on which the profile should be applied.
choices:
- all
- server-side
- client-side
default: all
aliases:
- all_profiles
irules:
version_added: "2.2"
description:
- List of rules to be applied in priority order.
- If you want to remove existing iRules, specify a single empty value; C("").
See the documentation for an example.
aliases:
- all_rules
enabled_vlans:
version_added: "2.2"
description:
- List of VLANs to be enabled. When a VLAN named C(all) is used, all
VLANs will be allowed. VLANs can be specified with or without the
leading partition. If the partition is not specified in the VLAN,
then the C(partition) option of this module will be used.
- This parameter is mutually exclusive with the C(disabled_vlans) parameter.
disabled_vlans:
version_added: 2.5
description:
- List of VLANs to be disabled. If the partition is not specified in the VLAN,
then the C(partition) option of this module will be used.
- This parameter is mutually exclusive with the C(enabled_vlans) parameters.
pool:
description:
- Default pool for the virtual server.
- If you want to remove the existing pool, specify an empty value; C("").
See the documentation for an example.
policies:
description:
- Specifies the policies for the virtual server
aliases:
- all_policies
snat:
description:
- Source network address policy.
required: false
choices:
- None
- Automap
- Name of a SNAT pool (eg "/Common/snat_pool_name") to enable SNAT
with the specific pool
default_persistence_profile:
description:
- Default Profile which manages the session persistence.
- If you want to remove the existing default persistence profile, specify an
empty value; C(""). See the documentation for an example.
description:
description:
- Virtual server description.
fallback_persistence_profile:
description:
- Specifies the persistence profile you want the system to use if it
cannot use the specified default persistence profile.
- If you want to remove the existing fallback persistence profile, specify an
empty value; C(""). See the documentation for an example.
version_added: 2.3
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
metadata:
description:
- Arbitrary key/value pairs that you can attach to a pool. This is useful in
situations where you might want to annotate a virtual to me managed by Ansible.
- Key names will be stored as strings; this includes names that are numbers.
- Values for all of the keys will be stored as strings; this includes values
that are numbers.
- Data will be persisted, not ephemeral.
version_added: 2.5
notes:
- Requires BIG-IP software version >= 11
- Requires the netaddr Python package on the host. This is as easy as pip
install netaddr.
requirements:
- netaddr
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Modify Port of the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: Common
name: my-virtual-server
port: 8080
delegate_to: localhost
- name: Delete virtual server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: absent
partition: Common
name: my-virtual-server
delegate_to: localhost
- name: Add virtual server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: Common
name: my-virtual-server
destination: 10.10.10.10
port: 443
pool: my-pool
snat: Automap
description: Test Virtual Server
profiles:
- http
- fix
- name: clientssl
context: server-side
- name: ilx
context: client-side
policies:
- my-ltm-policy-for-asm
- ltm-uri-policy
- ltm-policy-2
- ltm-policy-3
enabled_vlans:
- /Common/vlan2
delegate_to: localhost
- name: Add FastL4 virtual server
bigip_virtual_server:
destination: 1.1.1.1
name: fastl4_vs
port: 80
profiles:
- fastL4
state: present
- name: Add iRules to the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
name: my-virtual-server
irules:
- irule1
- irule2
delegate_to: localhost
- name: Remove one iRule from the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
name: my-virtual-server
irules:
- irule2
delegate_to: localhost
- name: Remove all iRules from the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
name: my-virtual-server
irules: ""
delegate_to: localhost
- name: Remove pool from the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
name: my-virtual-server
pool: ""
delegate_to: localhost
- name: Add metadata to virtual
bigip_pool:
server: lb.mydomain.com
user: admin
password: secret
state: absent
name: my-pool
partition: Common
metadata:
ansible: 2.4
updated_at: 2017-12-20T17:50:46Z
delegate_to: localhost
'''
RETURN = r'''
description:
description: New description of the virtual server.
returned: changed
type: string
sample: This is my description
default_persistence_profile:
description: Default persistence profile set on the virtual server.
returned: changed
type: string
sample: /Common/dest_addr
destination:
description: Destination of the virtual server.
returned: changed
type: string
sample: 1.1.1.1
disabled:
description: Whether the virtual server is disabled, or not.
returned: changed
type: bool
sample: True
disabled_vlans:
description: List of VLANs that the virtual is disabled for.
returned: changed
type: list
sample: ['/Common/vlan1', '/Common/vlan2']
enabled:
description: Whether the virtual server is enabled, or not.
returned: changed
type: bool
sample: False
enabled_vlans:
description: List of VLANs that the virtual is enabled for.
returned: changed
type: list
sample: ['/Common/vlan5', '/Common/vlan6']
fallback_persistence_profile:
description: Fallback persistence profile set on the virtual server.
returned: changed
type: string
sample: /Common/source_addr
irules:
description: iRules set on the virtual server.
returned: changed
type: list
sample: ['/Common/irule1', '/Common/irule2']
pool:
description: Pool that the virtual server is attached to.
returned: changed
type: string
sample: /Common/my-pool
policies:
description: List of policies attached to the virtual.
returned: changed
type: list
sample: ['/Common/policy1', '/Common/policy2']
port:
description: Port that the virtual server is configured to listen on.
returned: changed
type: int
sample: 80
profiles:
description: List of profiles set on the virtual server.
returned: changed
type: list
sample: [{'name': 'tcp', 'context': 'server-side'}, {'name': 'tcp-legacy', 'context': 'client-side'}]
snat:
description: SNAT setting of the virtual server.
returned: changed
type: string
sample: Automap
source:
description: Source address, in CIDR form, set on the virtual server.
returned: changed
type: string
sample: 1.2.3.4/32
metadata:
description: The new value of the virtual.
returned: changed
type: dict
sample: {'key1': 'foo', 'key2': 'bar'}
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
from collections import namedtuple
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
class Parameters(AnsibleF5Parameters):
api_map = {
'sourceAddressTranslation': 'snat',
'fallbackPersistence': 'fallback_persistence_profile',
'persist': 'default_persistence_profile',
'vlansEnabled': 'vlans_enabled',
'vlansDisabled': 'vlans_disabled',
'profilesReference': 'profiles',
'policiesReference': 'policies',
'rules': 'irules'
}
api_attributes = [
'description',
'destination',
'disabled',
'enabled',
'fallbackPersistence',
'metadata',
'persist',
'policies',
'pool',
'profiles',
'rules',
'source',
'sourceAddressTranslation',
'vlans',
'vlansEnabled',
'vlansDisabled',
]
updatables = [
'description',
'default_persistence_profile',
'destination',
'disabled_vlans',
'enabled',
'enabled_vlans',
'fallback_persistence_profile',
'irules',
'metadata',
'pool',
'policies',
'port',
'profiles',
'snat',
'source'
]
returnables = [
'description',
'default_persistence_profile',
'destination',
'disabled',
'disabled_vlans',
'enabled',
'enabled_vlans',
'fallback_persistence_profile',
'irules',
'metadata',
'pool',
'policies',
'port',
'profiles',
'snat',
'source',
'vlans',
'vlans_enabled',
'vlans_disabled'
]
profiles_mutex = [
'sip', 'sipsession', 'iiop', 'rtsp', 'http', 'diameter',
'diametersession', 'radius', 'ftp', 'tftp', 'dns', 'pptp', 'fix'
]
def to_return(self):
result = {}
for returnable in self.returnables:
try:
result[returnable] = getattr(self, returnable)
except Exception as ex:
pass
result = self._filter_params(result)
return result
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
def is_valid_ip(self, value):
try:
netaddr.IPAddress(value)
return True
except (netaddr.core.AddrFormatError, ValueError):
return False
def _format_port_for_destination(self, ip, port):
addr = netaddr.IPAddress(ip)
if addr.version == 6:
if port == 0:
result = '.any'
else:
result = '.{0}'.format(port)
else:
result = ':{0}'.format(port)
return result
def _format_destination(self, address, port, route_domain):
if port is None:
if route_domain is None:
result = '{0}'.format(
self._fqdn_name(address)
)
else:
result = '{0}%{1}'.format(
self._fqdn_name(address),
route_domain
)
else:
port = self._format_port_for_destination(address, port)
if route_domain is None:
result = '{0}{1}'.format(
self._fqdn_name(address),
port
)
else:
result = '{0}%{1}{2}'.format(
self._fqdn_name(address),
route_domain,
port
)
return result
class ApiParameters(Parameters):
@property
def destination(self):
if self._values['destination'] is None:
return None
destination = self.destination_tuple
result = self._format_destination(destination.ip, destination.port, destination.route_domain)
return result
@property
def source(self):
if self._values['source'] is None:
return None
try:
addr = netaddr.IPNetwork(self._values['source'])
result = '{0}/{1}'.format(str(addr.ip), addr.prefixlen)
return result
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The source IP address must be specified in CIDR format: address/prefix"
)
@property
def destination_tuple(self):
Destination = namedtuple('Destination', ['ip', 'port', 'route_domain'])
# Remove the partition
if self._values['destination'] is None:
result = Destination(ip=None, port=None, route_domain=None)
return result
destination = re.sub(r'^/[a-zA-Z0-9_.-]+/', '', self._values['destination'])
if self.is_valid_ip(destination):
result = Destination(
ip=destination,
port=None,
route_domain=None
)
return result
# Covers the following examples
#
# /Common/2700:bc00:1f10:101::6%2.80
# 2700:bc00:1f10:101::6%2.80
# 1.1.1.1%2:80
# /Common/1.1.1.1%2:80
# /Common/2700:bc00:1f10:101::6%2.any
#
pattern = r'(?P<ip>[^%]+)%(?P<route_domain>[0-9]+)[:.](?P<port>[0-9]+|any)'
matches = re.search(pattern, destination)
if matches:
try:
port = int(matches.group('port'))
except ValueError:
# Can be a port of "any". This only happens with IPv6
port = matches.group('port')
if port == 'any':
port = 0
ip = matches.group('ip')
if not self.is_valid_ip(ip):
raise F5ModuleError(
"The provided destination is not a valid IP address"
)
result = Destination(
ip=matches.group('ip'),
port=port,
route_domain=int(matches.group('route_domain'))
)
return result
pattern = r'(?P<ip>[^%]+)%(?P<route_domain>[0-9]+)'
matches = re.search(pattern, destination)
if matches:
ip = matches.group('ip')
if not self.is_valid_ip(ip):
raise F5ModuleError(
"The provided destination is not a valid IP address"
)
result = Destination(
ip=matches.group('ip'),
port=None,
route_domain=int(matches.group('route_domain'))
)
return result
parts = destination.split('.')
if len(parts) == 4:
# IPv4
ip, port = destination.split(':')
if not self.is_valid_ip(ip):
raise F5ModuleError(
"The provided destination is not a valid IP address"
)
result = Destination(
ip=ip,
port=int(port),
route_domain=None
)
return result
elif len(parts) == 2:
# IPv6
ip, port = destination.split('.')
try:
port = int(port)
except ValueError:
# Can be a port of "any". This only happens with IPv6
if port == 'any':
port = 0
if not self.is_valid_ip(ip):
raise F5ModuleError(
"The provided destination is not a valid IP address"
)
result = Destination(
ip=ip,
port=port,
route_domain=None
)
return result
else:
result = Destination(ip=None, port=None, route_domain=None)
return result
@property
def port(self):
destination = self.destination_tuple
self._values['port'] = destination.port
return destination.port
@property
def route_domain(self):
destination = self.destination_tuple
self._values['route_domain'] = destination.route_domain
return destination.route_domain
@property
def profiles(self):
if 'items' not in self._values['profiles']:
return None
result = []
for item in self._values['profiles']['items']:
context = item['context']
name = item['name']
if context in ['all', 'serverside', 'clientside']:
result.append(dict(name=name, context=context, fullPath=item['fullPath']))
else:
raise F5ModuleError(
"Unknown profile context found: '{0}'".format(context)
)
return result
@property
def policies(self):
if 'items' not in self._values['policies']:
return None
result = []
for item in self._values['policies']['items']:
name = item['name']
partition = item['partition']
result.append(dict(name=name, partition=partition))
return result
@property
def default_persistence_profile(self):
if self._values['default_persistence_profile'] is None:
return None
# These persistence profiles are always lists when we get them
# from the REST API even though there can only be one. We'll
# make it a list again when we get to the Difference engine.
return self._values['default_persistence_profile'][0]
@property
def enabled(self):
if 'enabled' in self._values:
return True
else:
return False
@property
def disabled(self):
if 'disabled' in self._values:
return True
return False
@property
def metadata(self):
if self._values['metadata'] is None:
return None
result = []
for md in self._values['metadata']:
tmp = dict(name=str(md['name']))
if 'value' in md:
tmp['value'] = str(md['value'])
else:
tmp['value'] = ''
result.append(tmp)
return result
class ModuleParameters(Parameters):
def _handle_profile_context(self, tmp):
if 'context' not in tmp:
tmp['context'] = 'all'
else:
if 'name' not in tmp:
raise F5ModuleError(
"A profile name must be specified when a context is specified."
)
tmp['context'] = tmp['context'].replace('server-side', 'serverside')
tmp['context'] = tmp['context'].replace('client-side', 'clientside')
def _handle_clientssl_profile_nuances(self, profile):
if profile['name'] != 'clientssl':
return
if profile['context'] != 'clientside':
profile['context'] = 'clientside'
@property
def destination(self):
addr = self._values['destination'].split("%")[0]
if not self.is_valid_ip(addr):
raise F5ModuleError(
"The provided destination is not a valid IP address"
)
result = self._format_destination(addr, self.port, self.route_domain)
return result
@property
def destination_tuple(self):
Destination = namedtuple('Destination', ['ip', 'port', 'route_domain'])
if self._values['destination'] is None:
result = Destination(ip=None, port=None, route_domain=None)
return result
addr = self._values['destination'].split("%")[0]
result = Destination(ip=addr, port=self.port, route_domain=self.route_domain)
return result
@property
def source(self):
if self._values['source'] is None:
return None
try:
addr = netaddr.IPNetwork(self._values['source'])
result = '{0}/{1}'.format(str(addr.ip), addr.prefixlen)
return result
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The source IP address must be specified in CIDR format: address/prefix"
)
@property
def port(self):
if self._values['port'] is None:
return None
if self._values['port'] in ['*', 'any']:
return 0
self._check_port()
return int(self._values['port'])
def _check_port(self):
try:
port = int(self._values['port'])
except ValueError:
raise F5ModuleError(
"The specified port was not a valid integer"
)
if 0 <= port <= 65535:
return port
raise F5ModuleError(
"Valid ports must be in range 0 - 65535"
)
@property
def irules(self):
results = []
if self._values['irules'] is None:
return None
if len(self._values['irules']) == 1 and self._values['irules'][0] == '':
return ''
for irule in self._values['irules']:
result = self._fqdn_name(irule)
results.append(result)
return results
@property
def profiles(self):
if self._values['profiles'] is None:
return None
if len(self._values['profiles']) == 1 and self._values['profiles'][0] == '':
return ''
result = []
for profile in self._values['profiles']:
tmp = dict()
if isinstance(profile, dict):
tmp.update(profile)
self._handle_profile_context(tmp)
if 'name' not in profile:
tmp['name'] = profile
tmp['fullPath'] = self._fqdn_name(tmp['name'])
self._handle_clientssl_profile_nuances(tmp)
else:
tmp['name'] = profile
tmp['context'] = 'all'
tmp['fullPath'] = self._fqdn_name(tmp['name'])
self._handle_clientssl_profile_nuances(tmp)
result.append(tmp)
mutually_exclusive = [x['name'] for x in result if x in self.profiles_mutex]
if len(mutually_exclusive) > 1:
raise F5ModuleError(
"Profiles {0} are mutually exclusive".format(
', '.join(self.profiles_mutex).strip()
)
)
return result
@property
def policies(self):
if self._values['policies'] is None:
return None
if len(self._values['policies']) == 1 and self._values['policies'][0] == '':
return ''
result = []
policies = [self._fqdn_name(p) for p in self._values['policies']]
policies = set(policies)
for policy in policies:
parts = policy.split('/')
if len(parts) != 3:
raise F5ModuleError(
"The specified policy '{0}' is malformed".format(policy)
)
tmp = dict(
name=parts[2],
partition=parts[1]
)
result.append(tmp)
return result
@property
def pool(self):
if self._values['pool'] is None:
return None
if self._values['pool'] == '':
return ''
return self._fqdn_name(self._values['pool'])
@property
def vlans_enabled(self):
if self._values['enabled_vlans'] is None:
return None
elif self._values['vlans_enabled'] is False:
# This is a special case for "all" enabled VLANs
return False
if self._values['disabled_vlans'] is None:
return True
return False
@property
def vlans_disabled(self):
if self._values['disabled_vlans'] is None:
return None
elif self._values['vlans_disabled'] is True:
# This is a special case for "all" enabled VLANs
return True
elif self._values['enabled_vlans'] is None:
return True
return False
@property
def enabled_vlans(self):
if self._values['enabled_vlans'] is None:
return None
elif any(x.lower() for x in self._values['enabled_vlans'] if x.lower() in ['all', '*']):
result = [self._fqdn_name('all')]
if result[0].endswith('/all'):
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg="Usage of the 'ALL' value for 'enabled_vlans' parameter is deprecated. Use '*' instead",
version='2.5'
)
)
return result
results = list(set([self._fqdn_name(x) for x in self._values['enabled_vlans']]))
results.sort()
return results
@property
def disabled_vlans(self):
if self._values['disabled_vlans'] is None:
return None
elif any(x.lower() for x in self._values['disabled_vlans'] if x.lower() in ['all', '*']):
raise F5ModuleError(
"You cannot disable all VLANs. You must name them individually."
)
results = list(set([self._fqdn_name(x) for x in self._values['disabled_vlans']]))
results.sort()
return results
@property
def vlans(self):
disabled = self.disabled_vlans
if disabled:
return self.disabled_vlans
return self.enabled_vlans
@property
def state(self):
if self._values['state'] == 'present':
return 'enabled'
return self._values['state']
@property
def snat(self):
if self._values['snat'] is None:
return None
lowercase = self._values['snat'].lower()
if lowercase in ['automap', 'none']:
return dict(type=lowercase)
snat_pool = self._fqdn_name(self._values['snat'])
return dict(pool=snat_pool, type='snat')
@property
def default_persistence_profile(self):
if self._values['default_persistence_profile'] is None:
return None
if self._values['default_persistence_profile'] == '':
return ''
profile = self._fqdn_name(self._values['default_persistence_profile'])
parts = profile.split('/')
if len(parts) != 3:
raise F5ModuleError(
"The specified 'default_persistence_profile' is malformed"
)
result = dict(
name=parts[2],
partition=parts[1]
)
return result
@property
def fallback_persistence_profile(self):
if self._values['fallback_persistence_profile'] is None:
return None
if self._values['fallback_persistence_profile'] == '':
return ''
result = self._fqdn_name(self._values['fallback_persistence_profile'])
return result
@property
def enabled(self):
if self._values['state'] == 'enabled':
return True
elif self._values['state'] == 'disabled':
return False
else:
return None
@property
def disabled(self):
if self._values['state'] == 'enabled':
return False
elif self._values['state'] == 'disabled':
return True
else:
return None
@property
def metadata(self):
if self._values['metadata'] is None:
return None
if self._values['metadata'] == '':
return []
result = []
try:
for k, v in iteritems(self._values['metadata']):
tmp = dict(name=str(k))
if v:
tmp['value'] = str(v)
else:
tmp['value'] = ''
result.append(tmp)
except AttributeError:
raise F5ModuleError(
"The 'metadata' parameter must be a dictionary of key/value pairs."
)
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
@property
def vlans(self):
if self._values['vlans'] is None:
return None
elif len(self._values['vlans']) == 0:
return []
elif any(x for x in self._values['vlans'] if x.lower() in ['/common/all', 'all']):
return []
return self._values['vlans']
class ReportableChanges(Changes):
@property
def snat(self):
if self._values['snat'] is None:
return None
result = self._values['snat'].get('type', None)
if result == 'automap':
return 'Automap'
elif result == 'none':
return 'none'
result = self._values['snat'].get('pool', None)
return result
@property
def destination(self):
params = ApiParameters(params=dict(destination=self._values['destination']))
result = params.destination_tuple.ip
return result
@property
def port(self):
params = ApiParameters(params=dict(destination=self._values['destination']))
result = params.destination_tuple.port
return result
@property
def default_persistence_profile(self):
if len(self._values['default_persistence_profile']) == 0:
return []
profile = self._values['default_persistence_profile'][0]
result = '/{0}/{1}'.format(profile['partition'], profile['name'])
return result
@property
def policies(self):
if len(self._values['policies']) == 0:
return []
result = ['/{0}/{1}'.format(x['partition'], x['name']) for x in self._values['policies']]
return result
@property
def enabled_vlans(self):
if len(self._values['vlans']) == 0 and self._values['vlans_disabled'] is True:
return 'all'
elif len(self._values['vlans']) > 0 and self._values['vlans_enabled'] is True:
return self._values['vlans']
@property
def disabled_vlans(self):
if len(self._values['vlans']) > 0 and self._values['vlans_disabled'] is True:
return self._values['vlans']
class Difference(object):
def __init__(self, want, have=None):
self.have = have
self.want = want
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
def _update_vlan_status(self, result):
if self.want.vlans_disabled is not None:
if self.want.vlans_disabled != self.have.vlans_disabled:
result['vlans_disabled'] = self.want.vlans_disabled
result['vlans_enabled'] = not self.want.vlans_disabled
elif self.want.vlans_enabled is not None:
if any(x.lower().endswith('/all') for x in self.want.vlans):
if self.have.vlans_enabled is True:
result['vlans_disabled'] = True
result['vlans_enabled'] = False
elif self.want.vlans_enabled != self.have.vlans_enabled:
result['vlans_disabled'] = not self.want.vlans_enabled
result['vlans_enabled'] = self.want.vlans_enabled
@property
def destination(self):
addr_tuple = [self.want.destination, self.want.port, self.want.route_domain]
if all(x for x in addr_tuple if x is None):
return None
have = self.have.destination_tuple
if self.want.port is None:
self.want.update({'port': have.port})
if self.want.route_domain is None:
self.want.update({'route_domain': have.route_domain})
if self.want.destination_tuple.ip is None:
address = have.ip
else:
address = self.want.destination_tuple.ip
want = self.want._format_destination(address, self.want.port, self.want.route_domain)
if want != self.have.destination:
return self.want._fqdn_name(want)
@property
def source(self):
if self.want.source is None:
return None
want = netaddr.IPNetwork(self.want.source)
have = netaddr.IPNetwork(self.have.destination_tuple.ip)
if want.version != have.version:
raise F5ModuleError(
"The source and destination addresses for the virtual server must be be the same type (IPv4 or IPv6)."
)
if self.want.source != self.have.source:
return self.want.source
@property
def vlans(self):
if self.want.vlans is None:
return None
elif self.want.vlans == [] and self.have.vlans is None:
return None
elif self.want.vlans == self.have.vlans:
return None
# Specifically looking for /all because the vlans return value will be
# an FQDN list. This means that "all" will be returned as "/partition/all",
# ex, /Common/all.
#
# We do not want to accidentally match values that would end with the word
# "all", like "vlansall". Therefore we look for the forward slash because this
# is a path delimiter.
elif any(x.lower().endswith('/all') for x in self.want.vlans):
if self.have.vlans is None:
return None
else:
return []
else:
return self.want.vlans
@property
def enabled_vlans(self):
return self.vlan_status
@property
def disabled_vlans(self):
return self.vlan_status
@property
def vlan_status(self):
result = dict()
vlans = self.vlans
if vlans is not None:
result['vlans'] = vlans
self._update_vlan_status(result)
return result
@property
def port(self):
result = self.destination
if result is not None:
return dict(
destination=result
)
@property
def profiles(self):
if self.want.profiles is None:
return None
if self.want.profiles == '' and len(self.have.profiles) > 0:
have = set([(p['name'], p['context'], p['fullPath']) for p in self.have.profiles])
if len(self.have.profiles) == 1:
if not any(x[0] in ['tcp', 'udp', 'sctp'] for x in have):
return []
else:
return None
else:
return []
if self.want.profiles == '' and len(self.have.profiles) == 0:
return None
want = set([(p['name'], p['context'], p['fullPath']) for p in self.want.profiles])
have = set([(p['name'], p['context'], p['fullPath']) for p in self.have.profiles])
if len(have) == 0:
return self.want.profiles
elif len(have) == 1:
if want != have:
return self.want.profiles
else:
if not any(x[0] == 'tcp' for x in want):
have = set([x for x in have if x[0] != 'tcp'])
if not any(x[0] == 'udp' for x in want):
have = set([x for x in have if x[0] != 'udp'])
if not any(x[0] == 'sctp' for x in want):
have = set([x for x in have if x[0] != 'sctp'])
want = set([(p[2], p[1]) for p in want])
have = set([(p[2], p[1]) for p in have])
if want != have:
return self.want.profiles
@property
def fallback_persistence_profile(self):
if self.want.fallback_persistence_profile is None:
return None
if self.want.fallback_persistence_profile == '' and self.have.fallback_persistence_profile is not None:
return ""
if self.want.fallback_persistence_profile == '' and self.have.fallback_persistence_profile is None:
return None
if self.want.fallback_persistence_profile != self.have.fallback_persistence_profile:
return self.want.fallback_persistence_profile
@property
def default_persistence_profile(self):
if self.want.default_persistence_profile is None:
return None
if self.want.default_persistence_profile == '' and self.have.default_persistence_profile is not None:
return []
if self.want.default_persistence_profile == '' and self.have.default_persistence_profile is None:
return None
if self.have.default_persistence_profile is None:
return [self.want.default_persistence_profile]
w_name = self.want.default_persistence_profile.get('name', None)
w_partition = self.want.default_persistence_profile.get('partition', None)
h_name = self.have.default_persistence_profile.get('name', None)
h_partition = self.have.default_persistence_profile.get('partition', None)
if w_name != h_name or w_partition != h_partition:
return [self.want.default_persistence_profile]
@property
def policies(self):
if self.want.policies is None:
return None
if self.want.policies == '' and self.have.policies is None:
return None
if self.want.policies == '' and len(self.have.policies) > 0:
return []
if not self.have.policies:
return self.want.policies
want = set([(p['name'], p['partition']) for p in self.want.policies])
have = set([(p['name'], p['partition']) for p in self.have.policies])
if not want == have:
return self.want.policies
@property
def snat(self):
if self.want.snat is None:
return None
if self.want.snat['type'] != self.have.snat['type']:
result = dict(snat=self.want.snat)
return result
if self.want.snat.get('pool', None) is None:
return None
if self.want.snat['pool'] != self.have.snat['pool']:
result = dict(snat=self.want.snat)
return result
@property
def enabled(self):
if self.want.state == 'enabled' and self.have.disabled:
result = dict(
enabled=True,
disabled=False
)
return result
elif self.want.state == 'disabled' and self.have.enabled:
result = dict(
enabled=False,
disabled=True
)
return result
@property
def irules(self):
if self.want.irules is None:
return None
if self.want.irules == '' and len(self.have.irules) > 0:
return []
if self.want.irules == '' and len(self.have.irules) == 0:
return None
if sorted(set(self.want.irules)) != sorted(set(self.have.irules)):
return self.want.irules
@property
def pool(self):
if self.want.pool is None:
return None
if self.want.pool == '' and self.have.pool is not None:
return ""
if self.want.pool == '' and self.have.pool is None:
return None
if self.want.pool != self.have.pool:
return self.want.pool
@property
def metadata(self):
if self.want.metadata is None:
return None
elif len(self.want.metadata) == 0 and self.have.metadata is None:
return None
elif len(self.want.metadata) == 0:
return []
elif self.have.metadata is None:
return self.want.metadata
result = self._diff_complex_items(self.want.metadata, self.have.metadata)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = ApiParameters()
self.want = ModuleParameters(client=self.client, params=self.module.params)
self.changes = UsableChanges()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource")
return True
def get_reportable_changes(self):
result = ReportableChanges(params=self.changes.to_return())
return result
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exists(self):
result = self.client.api.tm.ltm.virtuals.virtual.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def create(self):
required_resources = ['destination', 'port']
self._set_changed_options()
# This must be changed back to a list to make a valid REST API
# value. The module manipulates this as a normal dictionary
if self.want.default_persistence_profile is not None:
self.want.update({'default_persistence_profile': [self.want.default_persistence_profile]})
if self.want.destination is None:
raise F5ModuleError(
"'destination' must be specified when creating a virtual server"
)
if all(getattr(self.want, v) is None for v in required_resources):
raise F5ModuleError(
"You must specify both of " + ', '.join(required_resources)
)
if self.want.enabled_vlans is not None:
if any(x for x in self.want.enabled_vlans if x.lower() in ['/common/all', 'all']):
self.want.update(
dict(
enabled_vlans=[],
vlans_disabled=True,
vlans_enabled=False
)
)
if self.want.source and self.want.destination:
want = netaddr.IPNetwork(self.want.source)
have = netaddr.IPNetwork(self.want.destination_tuple.ip)
if want.version != have.version:
raise F5ModuleError(
"The source and destination addresses for the virtual server must be be the same type (IPv4 or IPv6)."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.ltm.virtuals.virtual.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def read_current_from_device(self):
result = self.client.api.tm.ltm.virtuals.virtual.load(
name=self.want.name,
partition=self.want.partition,
requests_params=dict(
params=dict(
expandSubcollections='true'
)
)
)
params = result.attrs
params.update(dict(kind=result.to_dict().get('kind', None)))
result = ApiParameters(params=params)
return result
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.virtuals.virtual.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
resource = self.client.api.tm.ltm.virtuals.virtual.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
name=dict(
required=True,
aliases=['vs']
),
destination=dict(
aliases=['address', 'ip']
),
port=dict(
type='int'
),
profiles=dict(
type='list',
aliases=['all_profiles'],
options=dict(
name=dict(required=False),
context=dict(default='all', choices=['all', 'server-side', 'client-side'])
)
),
policies=dict(
type='list',
aliases=['all_policies']
),
irules=dict(
type='list',
aliases=['all_rules']
),
enabled_vlans=dict(
type='list'
),
disabled_vlans=dict(
type='list'
),
pool=dict(),
description=dict(),
snat=dict(),
default_persistence_profile=dict(),
fallback_persistence_profile=dict(),
source=dict(),
metadata=dict(type='raw'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['enabled_vlans', 'disabled_vlans']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
RichIsMyName/PicklingToolsRepo | PythonModule/ptools/xmldumper_defs.py | 3 | 3487 |
# Options for dictionaries -> XML
# If XML attributes are being folded up, then you may
# want to prepend a special character to distinguish attributes
# from nested tags: an underscore is the usual default. If
# you don't want a prepend char, use XML_DUMP_NO_PREPEND option
XML_PREPEND_CHAR = '_'
# When dumping, by DEFAULT the keys that start with _ become
# attributes (this is called "unfolding"). You may want to keep
# those keys as tags. Consider:
#
# { 'top': { '_a':'1', '_b': 2 }}
#
# DEFAULT behavior, this becomes:
# <top a="1" b="2"></top> This moves the _names to attributes
#
# But, you may want all _ keys to stay as tags: that's the purpose of this opt
# <top> <_a>1</_a> <_b>2</b> </top>
XML_DUMP_PREPEND_KEYS_AS_TAGS = 0x100
# Any value that is simple (i.e., contains no nested
# content) will be placed in the attributes bin:
# For examples:
# { 'top': { 'x':'1', 'y': 2 }} -> <top x="1" y="2"></top>
XML_DUMP_SIMPLE_TAGS_AS_ATTRIBUTES = 0x200
# By default, everything dumps as strings (without quotes), but those things
# that are strings lose their "stringedness", which means
# they can't be "evaled" on the way back in. This option makes
# Vals that are strings dump with quotes.
XML_DUMP_STRINGS_AS_STRINGS = 0x400
# Like XML_DUMP_STRINGS_AS_STRINGS, but this one ONLY
# dumps strings with quotes if it thinks Eval will return
# something else. For example in { 's': '123' } : '123' is
# a STRING, not a number. When evalled with an XMLLoader
# with XML_LOAD_EVAL_CONTENT flag, that will become a number.
XML_DUMP_STRINGS_BEST_GUESS = 0x800
# Show nesting when you dump: like "prettyPrint": basically, it shows
# nesting
XML_DUMP_PRETTY = 0x1000
# Arrays of POD (plain old data: ints, real, complex, etc) can
# dump as huge lists: By default they just dump with one tag
# and then a list of numbers. If you set this option, they dump
# as a true XML list (<data>1.0/<data><data>2.0</data> ...)
# which is very expensive, but is easier to use with other
# tools (spreadsheets that support lists, etc.).
XML_DUMP_POD_LIST_AS_XML_LIST = 0x2000
# When dumping an empty tag, what do you want it to be?
# I.e., what is <empty></empty>
# Normally (DEFAULT) this is an empty dictionary 'empty': {}
# If you want that to be empty content, as in an empty string,
# set this option: 'empty': ""
# NOTE: You don't need this option if you are using
# XML_DUMP_STRINGS_AS_STRINGS or XML_DUMP_STRINGS_BEST_GUESS
XML_DUMP_PREFER_EMPTY_STRINGS = 0x4000
# When dumping dictionaries in order, a dict BY DEFAULT prints
# out the keys in sorted/alphabetic order and BY DEFAULT an OrderedDict
# prints out in the OrderedDict order. The "unnatural" order
# for a dict is to print out in "random" order (but probably slightly
# faster). The "unnatural" order for an OrderedDict is sorted
# (because normally we use an OrderedDict because we WANTS its
# notion of order)
XML_DUMP_UNNATURAL_ORDER = 0x8000
# Even though illegal XML, allow element names starting with Digits:
# when it does see a starting digit, it turns it into an _digit
# so that it is still legal XML
XML_TAGS_ACCEPTS_DIGITS = 0x80
# Allows digits as starting XML tags, even though illegal XML.
# This preserves the number as a tag.
XML_DIGITS_AS_TAGS = 0x80000
# When dumping XML, the default is to NOT have the XML header
# <?xml version="1.0">: Specifying this option will always make that
# the header always precedes all content
XML_STRICT_HDR = 0x10000
| bsd-3-clause |
cwacek/python-jsonschema-objects | python_jsonschema_objects/wrapper_types.py | 1 | 11522 | import collections
import logging
import six
from python_jsonschema_objects import util
from python_jsonschema_objects.validators import registry, ValidationError
from python_jsonschema_objects.util import lazy_format as fmt
logger = logging.getLogger(__name__)
class ArrayWrapper(collections.abc.MutableSequence):
"""A wrapper for array-like structures.
This implements all of the array like behavior that one would want,
with a dirty-tracking mechanism to avoid constant validation costs.
"""
@property
def strict(self):
return getattr(self, "_strict_", False)
def __len__(self):
return len(self.data)
def mark_or_revalidate(self):
if self.strict:
self.validate()
else:
self._dirty = True
def __delitem__(self, index):
self.data.pop(index)
self.mark_or_revalidate()
def insert(self, index, value):
self.data.insert(index, value)
self.mark_or_revalidate()
def __setitem__(self, index, value):
self.data[index] = value
self.mark_or_revalidate()
def __getitem__(self, idx):
return self.typed_elems[idx]
def __eq__(self, other):
if isinstance(other, ArrayWrapper):
return self.for_json() == other.for_json()
else:
return self.for_json() == other
def __init__(self, ary):
"""Initialize a wrapper for the array
Args:
ary: (list-like, or ArrayWrapper)
"""
""" Marks whether or not the underlying array has been modified """
self._dirty = True
""" Holds a typed copy of the array """
self._typed = None
if isinstance(ary, (list, tuple, collections.abc.Sequence)):
self.data = ary
else:
raise TypeError("Invalid value given to array validator: {0}".format(ary))
logger.debug(fmt("Initializing ArrayWrapper {} with {}", self, ary))
@property
def typed_elems(self):
logger.debug(fmt("Accessing typed_elems of ArrayWrapper {} ", self))
if self._typed is None or self._dirty is True:
self.validate()
return self._typed
def __repr__(self):
return "<%s=%s>" % (self.__class__.__name__, str(self.data))
@classmethod
def from_json(cls, jsonmsg):
import json
msg = json.loads(jsonmsg)
obj = cls(msg)
obj.validate()
return obj
def serialize(self):
enc = util.ProtocolJSONEncoder()
return enc.encode(self.typed_elems)
def for_json(self):
from python_jsonschema_objects import classbuilder
out = []
for item in self.typed_elems:
if isinstance(
item,
(classbuilder.ProtocolBase, classbuilder.LiteralValue, ArrayWrapper),
):
out.append(item.for_json())
else:
out.append(item)
return out
def validate(self):
if self.strict or self._dirty:
self.validate_items()
self.validate_length()
self.validate_uniqueness()
return True
def validate_uniqueness(self):
if getattr(self, "uniqueItems", False) is True:
testset = set(repr(item) for item in self.data)
if len(testset) != len(self.data):
raise ValidationError(
"{0} has duplicate elements, but uniqueness required".format(
self.data
)
)
def validate_length(self):
if getattr(self, "minItems", None) is not None:
if len(self.data) < self.minItems:
raise ValidationError(
"{1} has too few elements. Wanted {0}.".format(
self.minItems, self.data
)
)
if getattr(self, "maxItems", None) is not None:
if len(self.data) > self.maxItems:
raise ValidationError(
"{1} has too many elements. Wanted {0}.".format(
self.maxItems, self.data
)
)
def validate_items(self):
"""Validates the items in the backing array, including
performing type validation.
Sets the _typed property and clears the dirty flag as a side effect
Returns:
The typed array
"""
logger.debug(fmt("Validating {}", self))
from python_jsonschema_objects import classbuilder
if self.__itemtype__ is None:
return
type_checks = self.__itemtype__
if not isinstance(type_checks, (tuple, list)):
# we were given items = {'type': 'blah'} ; thus ensure the type for all data.
type_checks = [type_checks] * len(self.data)
elif len(type_checks) > len(self.data):
raise ValidationError(
"{1} does not have sufficient elements to validate against {0}".format(
self.__itemtype__, self.data
)
)
typed_elems = []
for elem, typ in zip(self.data, type_checks):
if isinstance(typ, dict):
for param, paramval in six.iteritems(typ):
validator = registry(param)
if validator is not None:
validator(paramval, elem, typ)
typed_elems.append(elem)
elif util.safe_issubclass(typ, classbuilder.LiteralValue):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, classbuilder.ProtocolBase):
if not isinstance(elem, typ):
try:
if isinstance(
elem, (six.string_types, six.integer_types, float)
):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError(
"'{0}' is not a valid value for '{1}': {2}".format(
elem, typ, e
)
)
else:
val = elem
val.validate()
typed_elems.append(val)
elif util.safe_issubclass(typ, ArrayWrapper):
val = typ(elem)
val.validate()
typed_elems.append(val)
elif isinstance(typ, (classbuilder.TypeProxy, classbuilder.TypeRef)):
try:
if isinstance(elem, (six.string_types, six.integer_types, float)):
val = typ(elem)
else:
val = typ(**util.coerce_for_expansion(elem))
except TypeError as e:
raise ValidationError(
"'{0}' is not a valid value for '{1}': {2}".format(elem, typ, e)
)
else:
val.validate()
typed_elems.append(val)
self._dirty = False
self._typed = typed_elems
return typed_elems
@staticmethod
def create(name, item_constraint=None, **addl_constraints):
"""Create an array validator based on the passed in constraints.
If item_constraint is a tuple, it is assumed that tuple validation
is being performed. If it is a class or dictionary, list validation
will be performed. Classes are assumed to be subclasses of ProtocolBase,
while dictionaries are expected to be basic types ('string', 'number', ...).
addl_constraints is expected to be key-value pairs of any of the other
constraints permitted by JSON Schema v4.
"""
logger.debug(
fmt(
"Constructing ArrayValidator with {} and {}",
item_constraint,
addl_constraints,
)
)
from python_jsonschema_objects import classbuilder
klassbuilder = addl_constraints.pop(
"classbuilder", None
) # type: python_jsonschema_objects.classbuilder.ClassBuilder
props = {}
if item_constraint is not None:
if isinstance(item_constraint, (tuple, list)):
for i, elem in enumerate(item_constraint):
isdict = isinstance(elem, (dict,))
isklass = isinstance(elem, type) and util.safe_issubclass(
elem, (classbuilder.ProtocolBase, classbuilder.LiteralValue)
)
if not any([isdict, isklass]):
raise TypeError(
"Item constraint (position {0}) is not a schema".format(i)
)
elif isinstance(
item_constraint, (classbuilder.TypeProxy, classbuilder.TypeRef)
):
pass
elif util.safe_issubclass(item_constraint, ArrayWrapper):
pass
else:
isdict = isinstance(item_constraint, (dict,))
isklass = isinstance(item_constraint, type) and util.safe_issubclass(
item_constraint,
(classbuilder.ProtocolBase, classbuilder.LiteralValue),
)
if not any([isdict, isklass]):
raise TypeError("Item constraint is not a schema")
if isdict and "$ref" in item_constraint:
if klassbuilder is None:
raise TypeError(
"Cannot resolve {0} without classbuilder".format(
item_constraint["$ref"]
)
)
item_constraint = klassbuilder.resolve_type(
item_constraint["$ref"], name
)
elif isdict and item_constraint.get("type") == "array":
# We need to create a sub-array validator.
item_constraint = ArrayWrapper.create(
name + "#sub",
item_constraint=item_constraint["items"],
addl_constraints=item_constraint,
)
elif isdict and "oneOf" in item_constraint:
# We need to create a TypeProxy validator
uri = "{0}_{1}".format(name, "<anonymous_list_type>")
type_array = klassbuilder.construct_objects(
item_constraint["oneOf"], uri
)
item_constraint = classbuilder.TypeProxy(type_array)
elif isdict and item_constraint.get("type") == "object":
""" We need to create a ProtocolBase object for this anonymous definition"""
uri = "{0}_{1}".format(name, "<anonymous_list_type>")
item_constraint = klassbuilder.construct(uri, item_constraint)
props["__itemtype__"] = item_constraint
strict = addl_constraints.pop("strict", False)
props["_strict_"] = strict
props.update(addl_constraints)
validator = type(str(name), (ArrayWrapper,), props)
return validator
| mit |
0x0all/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
cloud9209/cloud9209_flask | lib/bs4/tests/test_lxml.py | 273 | 2965 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError, e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
def test_real_xhtml_document(self):
"""lxml strips the XML definition from an XHTML doc, which is fine."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b''),
markup.replace(b'\n', b'').replace(
b'<?xml version="1.0" encoding="utf-8"?>', b''))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| apache-2.0 |
franciscogmm/FinancialAnalysisUsingNLPandMachineLearning | SentimentAnalysis - Polarity - Domain Specific Lexicon.py | 1 | 2667 | import csv
import pandas as pd
import nltk
from nltk import FreqDist,ngrams
from nltk.corpus import stopwords
import string
from os import listdir
from os.path import isfile, join
def ngram_list(file,n):
f = open(file,'rU')
raw = f.read()
raw = raw.replace('\n',' ')
#raw = raw.decode('utf8')
#raw = raw.decode("utf-8", 'ignore')
ngramz = ngrams(raw.split(),n)
return ngramz
def IsNotNull(value):
return value is not None and len(value) > 0
mypath = '/Users/francis/Documents/FORDHAM/2nd Term/Text Analytics/' #path where files are located
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
dict_p = []
f = open('positive.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_p.append(t)
f.close
dict_n = []
f = open('negative.txt', 'r')
for line in f:
t = line.strip().lower()
if IsNotNull(t):
dict_n.append(t)
f.close
totallist = []
rowlist = []
qa = 0
qb = 0
counti = 0
for i in onlyfiles:
if i.endswith('.txt'):
# get code
j = i.replace('.txt','')
# string filename
file = mypath + str(i)
print i
f = open(file,'rU')
raw = f.read()
#print type(raw)
raw = [w.translate(None, string.punctuation) for w in raw]
raw = ''.join(raw)
raw = raw.replace('\n','')
raw = raw.replace(' ','')
#print raw
qa = 0
qb = 0
for word in dict_p:
if word in raw:
qa += 1
for word in dict_n:
if word in raw:
qb += 1
qc = qa - qb
if qc > 0:
sentiment = 'POSITIVE'
elif qc == 0:
sentiment = 'NEUTRAL'
else:
sentiment = 'NEGATIVE'
rowlist.append(i)
rowlist.append(qa)
rowlist.append(qb)
rowlist.append(qc)
rowlist.append(sentiment)
print counti
counti += 1
totallist.append(rowlist)
rowlist = []
else:
pass
labels = ('file', 'P', 'N', 'NET', 'SENTIMENT')
df = pd.DataFrame.from_records(totallist, columns = labels)
df.to_csv('oursentiment.csv', index = False)
#print dict_p
# allbigrams.append(ngram_list(file,2))
# print i + ' BIGRAM - OK'
# alltrigrams.append(ngram_list(file,3))
# print i + ' TRIGRAM - OK'
# allfourgrams.append(ngram_list(file,4))
# print i + ' FOURGRAM - OK'
# allfivegrams.append(ngram_list(file,5))
# print i + ' TRIGRAM - OK'
# allsixgrams.append(ngram_list(file,6))
# print i + ' SIXGRAM - OK'
# allsevengrams.append(ngram_list(file,7))
# print i + ' SEVENGRAM - OK'
# alleightgrams.append(ngram_list(file,8))
# print i + ' EIGHTGRAM - OK' | mit |
oihane/server-tools | auth_dynamic_groups/model/res_users.py | 14 | 2115 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.models import Model
from openerp.modules.registry import RegistryManager
from openerp import SUPERUSER_ID
class res_users(Model):
_inherit = 'res.users'
def _login(self, db, login, password):
uid = super(res_users, self)._login(db, login, password)
if uid:
self.update_dynamic_groups(uid, db)
return uid
def update_dynamic_groups(self, uid, db):
pool = RegistryManager.get(db)
cr = pool._db.cursor()
user = pool.get('res.users').browse(cr, SUPERUSER_ID, uid)
groups_obj = pool.get('res.groups')
user.write(
{
'groups_id': [
(4, dynamic_group.id)
if dynamic_group.eval_dynamic_group_condition(uid=uid)
else (3, dynamic_group.id)
for dynamic_group in groups_obj.browse(
cr, SUPERUSER_ID,
groups_obj.search(cr, SUPERUSER_ID,
[('is_dynamic', '=', True)]))
],
})
cr.commit()
cr.close()
| agpl-3.0 |
DocBO/mubosym | mubosym/simple_tire_model_interface.py | 2 | 8005 | # -*- coding: utf-8 -*-
"""
simple_tire_model_interface
===========================
Created on Wed May 27 18:02:53 2015
@author: oliver
"""
import sys
from sympy import lambdify, symbols
import numpy as np
b = [1.5,0.,1100.,0.,300.,0.,0.,0.,-2.,0.,0.,0.,0.,0.]
a = [1.4,0.,1100.,1100.,10.,0.,0.,-2.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]
def Pacejka_F_long(Fz, slip):
"""
longitudinal force
:param (float) Fz: Force in vertical direction in N
:param (float) slip: relative slip fraction (0..1)
"""
if Fz == 0:
return 0.
slip = slip*100.0
Fz = Fz/1000.0
C = b[0]
D = Fz*(b[1]*Fz+b[2])
BCD = (Fz*(b[3]*Fz+b[4]))*np.exp(-b[5]*Fz)
B = BCD/(C*D)
H = b[9]*Fz+b[10]
V = b[11]*Fz+b[12]
E = ((b[6]*Fz*Fz)+b[7]*Fz+b[8])*(1-(b[13]*np.sign(slip+H)))
Bx1 = B*(slip+H)
Fx = D*np.sin(C*np.arctan(Bx1-E*(Bx1-np.arctan(Bx1))))+V
return Fx
def Pacejka_F_lat(Fz, alpha, camber):
"""
lateral force
:param (float) Fz: Force in vertical direction in N
:param (float) alpha: slip angle in rad
:param (float) camber: camber angle in rad
"""
if Fz == 0:
return 0.
alpha = alpha * 180.0/np.pi
camber = camber * 180.0/np.pi
Fz = Fz/1000.0
C = a[0]
D = Fz*(a[1]*Fz+a[2])*(1-a[15]*np.power(camber,2))
BCD = a[3]*np.sin(np.arctan(Fz/a[4])*2)*(1-a[5]*np.fabs(camber))
B = BCD/(C*D)
H = a[8]*Fz+a[9]+a[10]*camber
V = a[11]*Fz+a[12]+(a[13]*Fz+a[14])*camber*Fz
E = (a[6]*Fz+a[7])*(1-(a[16]*camber+a[17])*np.sign(alpha+H))
Bx1 = B*(alpha+H)
Fy = D*np.sin(C*np.arctan(Bx1-E*(Bx1-np.arctan(Bx1))))+V
return Fy
class simple_tire_model():
"""
A one body force model consists of:
* coordinate trafo generalized coords -> body coordinates (denoted list of) including pos, vel, orientation, and omega
* force calculator given as a python function with input according to our interface
* some preparation function: lambdifier to include symbolic functions into lambdas
"""
def __init__(self, paras = []):
# setup parameters
self.t = 0.
self.D = 200000.
self.gamma = 200.0
self.y0 = 0.0
self.C_side = 4500.0
self.C_align = 200.0
self.C_slip = 300.0
self.R_tire = 0.33
self.trafo = []
self.F_max = 4500.0
self.gamma_torque = 2.0
self.max_p = 100.0
self.tau = 0.1
self.signals = []
self.signals_values = []
def set_coordinate_trafo(self, tr):
"""
Input function for the coordinate trafo expressions (sympy).
:param tr: the transformation expressions as given in the mbs setup for the body
"""
self.trafo = tr
def set_subs_dicts(self, subs_dicts):
for sd in subs_dicts:
for ii in range(len(self.trafo)):
self.trafo[ii] = self.trafo[ii].subs(sd)
for ii in range(len(self.signals)):
self.signals[ii] = self.signals[ii].subs(sd)
def add_signal(self, expr):
self.signals.append(expr)
def lambdify_trafo(self, generalized_coords):
"""
This is the core function to lambdify the coordinate trafos in general
the trafos must be explicitely set via set_coordinate_trafo called from MBSCore (see therein)
:param generalized_coords: the generalized coords (symbols) of the final mbs setup (called in kaneify)
"""
if len(self.trafo) < 12:
print("call set_coordinate_trafo first")
sys.exit(0)
# for ii in range(12):
# print ii, self.trafo[ii]
t = symbols('t')
self.lam_t = lambdify(generalized_coords, t)
self.lam_x = lambdify(generalized_coords, self.trafo[0])
self.lam_y = lambdify(generalized_coords, self.trafo[1])
self.lam_z = lambdify(generalized_coords, self.trafo[2])
self.lam_nx = lambdify(generalized_coords, self.trafo[3])
self.lam_ny = lambdify(generalized_coords, self.trafo[4])
self.lam_nz = lambdify(generalized_coords, self.trafo[5])
self.lam_x_pt = lambdify(generalized_coords, self.trafo[6])
self.lam_y_pt = lambdify(generalized_coords, self.trafo[7])
self.lam_z_pt = lambdify(generalized_coords, self.trafo[8])
self.lam_omega_x = lambdify(generalized_coords, self.trafo[9])
self.lam_omega_y = lambdify(generalized_coords, self.trafo[10])
self.lam_omega_z = lambdify(generalized_coords, self.trafo[11])
self.lam_signals = [ lambdify(generalized_coords, expr) for expr in self.signals]
def trafo_lam(self, w):
"""
Just for reference all coordinate trafos as lambdas (not used at the moment).
:param w: the generalized coords (float numbers) of the final mbs setup
"""
return [self.lam_t(*w), self.lam_x(*w), self.lam_y(*w), self.lam_z(*w), \
self.lam_nx(*w), self.lam_ny(*w), self.lam_nz(*w), \
self.lam_x_pt(*w), self.lam_y_pt(*w), self.lam_z_pt(*w), \
self.lam_omega_x(*w), self.lam_omega_y(*w), self.lam_omega_z(*w)]
def force_lam(self, w):
"""
The model force/torque via lambdified expressions, input parameter here is always the full state vecor t,q,u.
Output is the force/toque via the model calc-function the nested input for the calc routine is fully possible written out:
* self.lam_t, self.lam_x, self.lam_y, self.lam_z,
* self.lam_nx, self.lam_ny, self.lam_nz,
* self.lam_x_pt, self.lam_y_pt, self.lam_z_pt,
* self.lam_omega_x self.lam_omega_y, self.lam_omega_z
but can be reduced to a subset
:param w: the generalized coords (float numbers) of the final mbs setup, The order has to be equal the one in calc.
"""
self.signals_values = [x(*w) for x in self.lam_signals]
return self._calc([ self.lam_t(*w), self.lam_y(*w), \
self.lam_x_pt(*w), self.lam_y_pt(*w), self.lam_z_pt(*w),\
self.lam_omega_z(*w) ] )
def _calc(self, inp):
"""
The python function which connects some external model calculation with the mbs model
e.g. tire-model, rail model. It is only called internally by force_lam.
* input list inp are some relevant model coordinates (out of 12 possible): [ x, y, z, nx, ny, nz, x_pt, y_pt, z_pt, omega_x, omega_y, omega_z ] = inp
* output list is force in cartesian coord. world and torque cartesian coord. world
:param inp: the subset of all possible coord. of one body (see list), here expected as float numbers. The order has to be equal the one in force_lam
"""
signals = self.signals_values
[ t, y , x_pt, y_pt, z_pt, omega_z ] = inp
#print "SSSig: ",signals
eps = 5.0e-1
#preset values
F_x = 0.
F_y = 0.
F_z = 0.
T_x = 0.
T_y = 0.
T_z = 0.
#vertical reaction force
if y<0:
F_y = -self.D*(y-self.y0) - self.gamma*y_pt
else:
F_y = 0.
#side slip angle
alpha = np.arctan2(z_pt,(x_pt+eps)) #in the tire carrier frame
#slip
slip = (omega_z * self.R_tire + x_pt)/np.abs(x_pt+eps)
#######################################################
# Pacejka - Model:
F_z = - Pacejka_F_lat(F_y, alpha, 0.)
F_x = - Pacejka_F_long(F_y, slip)
T_z = F_x * self.R_tire - self.gamma_torque * omega_z
#print F_y
#self.oz += 1./10.*delta_t * T_z
return [F_x, F_y, F_z, T_x, T_y, T_z], [F_x, F_y, F_z, T_z, 1e+2*slip, 180/np.pi*alpha]
def get_signal_length(self):
return 6
| mit |
mdaif/olympia | apps/landfill/tests/test_categories.py | 15 | 1025 | # -*- coding: utf-8 -*-
from nose.tools import eq_, ok_
import amo
import amo.tests
from addons.models import Category
from constants.applications import APPS
from landfill.categories import generate_categories
class CategoriesTests(amo.tests.TestCase):
def test_categories_themes_generation(self):
data = generate_categories()
eq_(len(data), Category.objects.all().count())
eq_(len(data), 15)
def test_categories_themes_translations(self):
with self.activate(locale='es'):
data = generate_categories()
ok_(unicode(data[0].name).startswith(u'(español) '))
def test_categories_addons_generation(self):
data = generate_categories(APPS['android'])
eq_(len(data), Category.objects.all().count())
eq_(len(data), 10)
def test_categories_addons_translations(self):
with self.activate(locale='es'):
data = generate_categories(APPS['android'])
ok_(unicode(data[0].name).startswith(u'(español) '))
| bsd-3-clause |
alex/raven | raven/handlers/logging.py | 3 | 4470 | """
raven.handlers.logging
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import logging
import sys
import traceback
from raven.base import Client
from raven.utils.encoding import to_string
from raven.utils.stacks import iter_stack_frames
class SentryHandler(logging.Handler, object):
def __init__(self, *args, **kwargs):
client = kwargs.get('client_cls', Client)
if len(args) == 1:
arg = args[0]
if isinstance(arg, basestring):
self.client = client(dsn=arg)
elif isinstance(arg, Client):
self.client = arg
else:
raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % (
self.__class__.__name__,
arg,
))
elif 'client' in kwargs:
self.client = kwargs['client']
elif len(args) == 2 and not kwargs:
servers, key = args
self.client = client(servers=servers, key=key)
else:
self.client = client(*args, **kwargs)
logging.Handler.__init__(self)
def emit(self, record):
# from sentry.client.middleware import SentryLogMiddleware
# # Fetch the request from a threadlocal variable, if available
# request = getattr(SentryLogMiddleware.thread, 'request', None)
self.format(record)
# Avoid typical config issues by overriding loggers behavior
if record.name.startswith('sentry.errors'):
print >> sys.stderr, to_string(record.message)
return
try:
return self._emit(record)
except Exception:
print >> sys.stderr, "Top level Sentry exception caught - failed creating log record"
print >> sys.stderr, to_string(record.msg)
print >> sys.stderr, to_string(traceback.format_exc())
try:
self.client.capture('Exception')
except Exception:
pass
def _emit(self, record, **kwargs):
data = {}
for k, v in record.__dict__.iteritems():
if '.' not in k and k not in ('culprit',):
continue
data[k] = v
stack = getattr(record, 'stack', None)
if stack is True:
stack = iter_stack_frames()
if stack:
frames = []
started = False
last_mod = ''
for item in stack:
if isinstance(item, (list, tuple)):
frame, lineno = item
else:
frame, lineno = item, item.f_lineno
if not started:
f_globals = getattr(frame, 'f_globals', {})
module_name = f_globals.get('__name__', '')
if last_mod.startswith('logging') and not module_name.startswith('logging'):
started = True
else:
last_mod = module_name
continue
frames.append((frame, lineno))
stack = frames
extra = getattr(record, 'data', {})
# Add in all of the data from the record that we aren't already capturing
for k in record.__dict__.keys():
if k in ('stack', 'name', 'args', 'msg', 'levelno', 'exc_text', 'exc_info', 'data', 'created', 'levelname', 'msecs', 'relativeCreated'):
continue
if k.startswith('_'):
continue
extra[k] = record.__dict__[k]
date = datetime.datetime.utcfromtimestamp(record.created)
# If there's no exception being processed, exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info and all(record.exc_info):
handler = self.client.get_handler('raven.events.Exception')
data.update(handler.capture(exc_info=record.exc_info))
data['checksum'] = handler.get_hash(data)
data['level'] = record.levelno
data['logger'] = record.name
return self.client.capture('Message', message=record.msg, params=record.args,
stack=stack, data=data, extra=extra,
date=date, **kwargs)
| bsd-3-clause |
timokoola/finnkinotxt | botocore/vendored/requests/packages/urllib3/poolmanager.py | 678 | 9406 | import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
if not host:
raise LocationValueError("No host specified.")
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
assert proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % proxy.scheme
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| apache-2.0 |
sassoftware/mint | mint/django_rest/rbuilder/querysets/views/v1/views.py | 1 | 8001 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django import http
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from mint.django_rest.deco import return_xml, requires, access, xObjRequires
from mint.django_rest.rbuilder import service
# from mint.django_rest.rbuilder.querysets import models
from mint.django_rest.rbuilder.rbac.rbacauth import rbac, manual_rbac
from mint.django_rest.rbuilder.errors import PermissionDenied
from mint.django_rest.rbuilder.rbac.manager.rbacmanager import \
READSET, MODSETDEF
def rbac_can_read_queryset(view, request, query_set_id, *args, **kwargs):
obj = view.mgr.getQuerySet(query_set_id)
if obj.is_public:
# existance of querysets like "All Systems", etc, are not stealthed.
# but may vary in size depending on the user accessing them's permissions
# (ReadMember) on their contents.
return True
user = view.mgr.getSessionInfo().user[0]
ok = view.mgr.userHasRbacPermission(user, obj, READSET)
return ok
def rbac_can_write_queryset(view, request, query_set_id, *args, **kwargs):
obj = view.mgr.getQuerySet(query_set_id)
user = view.mgr.getSessionInfo().user[0]
return view.mgr.userHasRbacPermission(user, obj, MODSETDEF)
class BaseQuerySetService(service.BaseService):
pass
class QuerySetsService(BaseQuerySetService):
# rbac is handled semimanually for this function -- show only
# querysets that we have permission to see
# but don't use full rbac code, because that is implemented using querysets
# and is too meta.
@access.authenticated
@return_xml
def rest_GET(self, request):
user = request._authUser
querysets = self.mgr.getQuerySets()
return self.mgr.filterRbacQuerysets(user, querysets, request)
# not used above, but still needed by load_from_href and other
# functions
def get(self):
return self.mgr.getQuerySets()
@access.admin
@requires('query_set', load=True, save=True)
@return_xml
def rest_POST(self, request, query_set):
return self.mgr.addQuerySet(query_set, request._authUser)
class QuerySetService(BaseQuerySetService):
# rbac is handled semimanually for this function -- show only
# querysets that we have permission to see
# but don't use full rbac code, because that is implemented using querysets
# and is too meta.
@rbac(manual_rbac)
@return_xml
def rest_GET(self, request, query_set_id):
user = request._authUser
queryset = self.mgr.getQuerySet(query_set_id)
if not queryset.is_public and not self.mgr.userHasRbacPermission(
user, queryset, READSET, request
):
raise PermissionDenied()
return queryset
# not used above, but still needed by load_from_href and other
# functions
def get(self, query_set_id):
return self.mgr.getQuerySet(query_set_id)
@access.admin
@requires('query_set')
@return_xml
def rest_PUT(self, request, query_set_id, query_set):
oldQuerySet = self.mgr.getQuerySet(query_set_id)
if oldQuerySet.pk != query_set.pk:
raise PermissionDenied(msg='Attempting to reassign ID')
return self.mgr.updateQuerySet(query_set, request._authUser)
@access.admin
def rest_DELETE(self, request, query_set_id):
querySet = self.mgr.getQuerySet(query_set_id)
self.mgr.deleteQuerySet(querySet)
response = http.HttpResponse(status=204)
return response
class QuerySetAllResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getQuerySetAllResult(query_set_id, for_user=request._authUser)
class QuerySetUniverseResultService(BaseQuerySetService):
'''the parent queryset of all objects of a given type'''
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
self.mgr.getQuerySetUniverseSet(query_set_id)
url = reverse('QuerySetAllResult', args=[query_set_id])
return HttpResponseRedirect(url)
class QuerySetChosenResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getQuerySetChosenResult(query_set_id, for_user=request._authUser)
@rbac(rbac_can_write_queryset)
# TODO: source fromc onstant somewhere
@requires(['systems', 'users', 'images', 'targets', 'project_branch_stages', 'projects', 'grants', 'roles'])
@return_xml
def rest_PUT(self, request, query_set_id, *args, **kwargs):
resources = kwargs.items()[0][1]
return self.mgr.addQuerySetChosen(query_set_id, resources, request._authUser)
@rbac(rbac_can_write_queryset)
# TODO: source fromc onstant somewhere
@requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role'])
@return_xml
def rest_POST(self, request, query_set_id, *args, **kwargs):
resource = kwargs.items()[0][1]
self.mgr.updateQuerySetChosen(query_set_id, resource, request._authUser)
return resource
@rbac(rbac_can_write_queryset)
# TODO: source fromc onstant somewhere
@requires(['system', 'user', 'image', 'target', 'project_branch_stage', 'project_branch', 'project', 'grant', 'role'])
@return_xml
def rest_DELETE(self, request, query_set_id, *args, **kwargs):
resource = kwargs.items()[0][1]
return self.mgr.deleteQuerySetChosen(query_set_id, resource, request._authUser)
class QuerySetFilteredResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getQuerySetFilteredResult(query_set_id, for_user=request._authUser)
class QuerySetChildResultService(BaseQuerySetService):
@access.authenticated
@return_xml
def rest_GET(self, request, query_set_id):
if rbac_can_read_queryset(self, request, query_set_id):
return self.mgr.getQuerySetChildResult(query_set_id)
else:
return self.mgr.getQuerySetChildResult(query_set_id, for_user=request._authUser)
# this is not expected to be our final API for removing child members
# but serves as a temporary one in case someone needs it. Deleting
# the queryset is not an option to clear it out because associated
# grants would be purged.
@rbac(rbac_can_write_queryset)
@requires('query_set')
@return_xml
def rest_DELETE(self, request, query_set_id, query_set):
return self.mgr.deleteQuerySetChild(query_set_id, query_set, for_user=request._authUser)
class QuerySetJobsService(BaseQuerySetService):
# no way to list running jobs at the moment
# since all jobs run immediately
@rbac(rbac_can_read_queryset)
@xObjRequires('job')
def rest_POST(self, request, query_set_id, job):
'''launch a job on this queryset'''
queryset = self.mgr.getQuerySet(query_set_id)
self.mgr.scheduleQuerySetJobAction(
queryset, job
)
return http.HttpResponse(status=200)
class QuerySetFilterDescriptorService(BaseQuerySetService):
# @access.authenticated
@return_xml
def rest_GET(self, request, query_set_id=None):
return self.mgr.getQuerySetFilterDescriptor(query_set_id)
| apache-2.0 |
dneg/cortex | python/IECoreMaya/TransformationMatrixParameterUI.py | 12 | 5608 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import maya.cmds
import IECore
import IECoreMaya
## The UI for the TransformationMatrixParameter supports the following
## userData()
##
## - "visibleFields" IECore.StringVectorData, A list of fields to
## display in the UI. Possible values are (D marks a default):
## "translate" D
## "rotate", D
## "scale" D
## "shear" D
## "rotatePivot",
## "rotatePivotTranslation",
## "scalePivot"
## "scalePivotTranslation"
class TransformationMatrixParameterUI( IECoreMaya.ParameterUI ) :
_allFields = ( "translate", "rotate", "scale", "shear", "scalePivot", "scalePivotTranslation", "rotatePivot", "rotatePivotTranslation" )
def __init__( self, node, parameter, **kw ) :
self._outerColumn = maya.cmds.columnLayout( adj=True )
IECoreMaya.ParameterUI.__init__( self, node, parameter, self._outerColumn, **kw )
maya.cmds.rowLayout( numberOfColumns=2, parent=self._outerColumn )
self._label = maya.cmds.text(
label = self.label(),
font = "tinyBoldLabelFont",
align = "right",
annotation = self.description()
)
self._manip = maya.cmds.button( label="Manipulate" )
maya.cmds.setParent("..")
maya.cmds.setParent("..")
self._fields = {}
self.__kw = kw.copy()
self.replace( self.node(), self.parameter )
def replace( self, node, parameter ) :
IECoreMaya.ParameterUI.replace( self, node, parameter )
currentParent = maya.cmds.setParent( query=True )
visibleFields = IECore.StringVectorData( ( "translate", "rotate", "scale", "shear" ) )
with IECore.IgnoredExceptions( KeyError ) :
userDataFields = parameter.userData()["UI"]["visibleFields"]
visibleFields = []
for u in userDataFields :
if u not in TransformationMatrixParameterUI._allFields:
IECore.msg(
IECore.Msg.Level.Warning,
"TransformationMatrixParameterUI",
"Invalid field '%s' requested in UI userData for '%s'. Available fields are %s."
% ( u, parameter.name, TransformationMatrixParameterUI._allFields )
)
continue
visibleFields.append( u )
for f in self._fields.keys() :
if f not in visibleFields :
maya.cmds.deleteUI( self._fields[f][0] )
del self._fields[f]
fnPH = IECoreMaya.FnParameterisedHolder( node )
baseName = fnPH.parameterPlugPath( parameter )
self._addPopupMenu( parentUI=self._label, attributeName=baseName )
for f in visibleFields :
if f not in self._fields :
layout = maya.cmds.rowLayout(
numberOfColumns = 4,
parent = self._outerColumn,
columnWidth4 = [ IECoreMaya.ParameterUI.textColumnWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex, IECoreMaya.ParameterUI.singleWidgetWidthIndex ]
)
maya.cmds.text( label=f, font="smallPlainLabelFont", align="right" )
self._fields[f] = ( layout, maya.cmds.floatField(), maya.cmds.floatField(), maya.cmds.floatField() )
maya.cmds.connectControl( self._fields[f][1], "%s%s%i" % ( baseName, f, 0 ) )
maya.cmds.connectControl( self._fields[f][2], "%s%s%i" % ( baseName, f, 1 ) )
maya.cmds.connectControl( self._fields[f][3], "%s%s%i" % ( baseName, f, 2 ) )
maya.cmds.button(
self._manip,
edit = True,
# The manip is currently only registered for float types
visible = isinstance( parameter, IECore.TransformationMatrixfParameter ),
command = self._createCallback( IECore.curry( IECoreMaya.ManipulatorUI.manipulateParameter, node, parameter ) )
)
maya.cmds.setParent( currentParent )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.TransformationMatrixfParameter, TransformationMatrixParameterUI )
IECoreMaya.ParameterUI.registerUI( IECore.TypeId.TransformationMatrixdParameter, TransformationMatrixParameterUI )
| bsd-3-clause |
simonpatrick/bite-project | deps/gdata-python-client/tests/gdata_tests/client_smoke_test.py | 39 | 1743 | #!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
import gdata.test_config as conf
import gdata.analytics.client
import gdata.apps.emailsettings.client
import gdata.blogger.client
import gdata.spreadsheets.client
import gdata.calendar_resource.client
import gdata.contacts.client
import gdata.docs.client
import gdata.projecthosting.client
import gdata.sites.client
class ClientSmokeTest(unittest.TestCase):
def test_check_auth_client_classes(self):
conf.check_clients_with_auth(self, (
gdata.analytics.client.AnalyticsClient,
gdata.apps.emailsettings.client.EmailSettingsClient,
gdata.blogger.client.BloggerClient,
gdata.spreadsheets.client.SpreadsheetsClient,
gdata.calendar_resource.client.CalendarResourceClient,
gdata.contacts.client.ContactsClient,
gdata.docs.client.DocsClient,
gdata.projecthosting.client.ProjectHostingClient,
gdata.sites.client.SitesClient
))
def suite():
return conf.build_suite([ClientSmokeTest])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
msabramo/ansible | lib/ansible/modules/cloud/google/gcdns_record.py | 49 | 28445 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 CallFire Inc.
#
# This file is part of Ansible.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcdns_record
short_description: Creates or removes resource records in Google Cloud DNS
description:
- Creates or removes resource records in Google Cloud DNS.
version_added: "2.2"
author: "William Albert (@walbert947)"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
options:
state:
description:
- Whether the given resource record should or should not be present.
required: false
choices: ["present", "absent"]
default: "present"
record:
description:
- The fully-qualified domain name of the resource record.
required: true
aliases: ['name']
zone:
description:
- The DNS domain name of the zone (e.g., example.com).
- One of either I(zone) or I(zone_id) must be specified as an
option, or the module will fail.
- If both I(zone) and I(zone_id) are specifed, I(zone_id) will be
used.
required: false
zone_id:
description:
- The Google Cloud ID of the zone (e.g., example-com).
- One of either I(zone) or I(zone_id) must be specified as an
option, or the module will fail.
- These usually take the form of domain names with the dots replaced
with dashes. A zone ID will never have any dots in it.
- I(zone_id) can be faster than I(zone) in projects with a large
number of zones.
- If both I(zone) and I(zone_id) are specifed, I(zone_id) will be
used.
required: false
type:
description:
- The type of resource record to add.
required: true
choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
record_data:
description:
- The record_data to use for the resource record.
- I(record_data) must be specified if I(state) is C(present) or
I(overwrite) is C(True), or the module will fail.
- Valid record_data vary based on the record's I(type). In addition,
resource records that contain a DNS domain name in the value
field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
in the value.
- Individual string record_data for TXT records must be enclosed in
double quotes.
- For resource records that have the same name but different
record_data (e.g., multiple A records), they must be defined as
multiple list entries in a single record.
required: false
aliases: ['value']
ttl:
description:
- The amount of time in seconds that a resource record will remain
cached by a caching resolver.
required: false
default: 300
overwrite:
description:
- Whether an attempt to overwrite an existing record should succeed
or fail. The behavior of this option depends on I(state).
- If I(state) is C(present) and I(overwrite) is C(True), this
module will replace an existing resource record of the same name
with the provided I(record_data). If I(state) is C(present) and
I(overwrite) is C(False), this module will fail if there is an
existing resource record with the same name and type, but
different resource data.
- If I(state) is C(absent) and I(overwrite) is C(True), this
module will remove the given resource record unconditionally.
If I(state) is C(absent) and I(overwrite) is C(False), this
module will fail if the provided record_data do not match exactly
with the existing resource record's record_data.
required: false
choices: [True, False]
default: False
service_account_email:
description:
- The e-mail address for a service account with access to Google
Cloud DNS.
required: false
default: null
pem_file:
description:
- The path to the PEM file associated with the service account
email.
- This option is deprecated and may be removed in a future release.
Use I(credentials_file) instead.
required: false
default: null
credentials_file:
description:
- The path to the JSON file associated with the service account
email.
required: false
default: null
project_id:
description:
- The Google Cloud Platform project ID to use.
required: false
default: null
notes:
- See also M(gcdns_zone).
- This modules's underlying library does not support in-place updates for
DNS resource records. Instead, resource records are quickly deleted and
recreated.
- SOA records are technically supported, but their functionality is limited
to verifying that a zone's existing SOA record matches a pre-determined
value. The SOA record cannot be updated.
- Root NS records cannot be updated.
- NAPTR records are not supported.
'''
EXAMPLES = '''
# Create an A record.
- gcdns_record:
record: 'www1.example.com'
zone: 'example.com'
type: A
value: '1.2.3.4'
# Update an existing record.
- gcdns_record:
record: 'www1.example.com'
zone: 'example.com'
type: A
overwrite: true
value: '5.6.7.8'
# Remove an A record.
- gcdns_record:
record: 'www1.example.com'
zone_id: 'example-com'
state: absent
type: A
value: '5.6.7.8'
# Create a CNAME record.
- gcdns_record:
record: 'www.example.com'
zone_id: 'example-com'
type: CNAME
value: 'www.example.com.' # Note the trailing dot
# Create an MX record with a custom TTL.
- gcdns_record:
record: 'example.com'
zone: 'example.com'
type: MX
ttl: 3600
value: '10 mail.example.com.' # Note the trailing dot
# Create multiple A records with the same name.
- gcdns_record:
record: 'api.example.com'
zone_id: 'example-com'
type: A
record_data:
- '192.0.2.23'
- '10.4.5.6'
- '198.51.100.5'
- '203.0.113.10'
# Change the value of an existing record with multiple record_data.
- gcdns_record:
record: 'api.example.com'
zone: 'example.com'
type: A
overwrite: true
record_data: # WARNING: All values in a record will be replaced
- '192.0.2.23'
- '192.0.2.42' # The changed record
- '198.51.100.5'
- '203.0.113.10'
# Safely remove a multi-line record.
- gcdns_record:
record: 'api.example.com'
zone_id: 'example-com'
state: absent
type: A
record_data: # NOTE: All of the values must match exactly
- '192.0.2.23'
- '192.0.2.42'
- '198.51.100.5'
- '203.0.113.10'
# Unconditionally remove a record.
- gcdns_record:
record: 'api.example.com'
zone_id: 'example-com'
state: absent
overwrite: true # overwrite is true, so no values are needed
type: A
# Create an AAAA record
- gcdns_record:
record: 'www1.example.com'
zone: 'example.com'
type: AAAA
value: 'fd00:db8::1'
# Create a PTR record
- gcdns_record:
record: '10.5.168.192.in-addr.arpa'
zone: '5.168.192.in-addr.arpa'
type: PTR
value: 'api.example.com.' # Note the trailing dot.
# Create an NS record
- gcdns_record:
record: 'subdomain.example.com'
zone: 'example.com'
type: NS
ttl: 21600
record_data:
- 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
- 'ns-cloud-d2.googledomains.com.'
- 'ns-cloud-d3.googledomains.com.'
- 'ns-cloud-d4.googledomains.com.'
# Create a TXT record
- gcdns_record:
record: 'example.com'
zone_id: 'example-com'
type: TXT
record_data:
- '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
- '"hello " "world"' # A multi-string TXT value
'''
RETURN = '''
overwrite:
description: Whether to the module was allowed to overwrite the record
returned: success
type: boolean
sample: True
record:
description: Fully-qualified domain name of the resource record
returned: success
type: string
sample: mail.example.com.
state:
description: Whether the record is present or absent
returned: success
type: string
sample: present
ttl:
description: The time-to-live of the resource record
returned: success
type: int
sample: 300
type:
description: The type of the resource record
returned: success
type: string
sample: A
record_data:
description: The resource record values
returned: success
type: list
sample: ['5.6.7.8', '9.10.11.12']
zone:
description: The dns name of the zone
returned: success
type: string
sample: example.com.
zone_id:
description: The Google Cloud DNS ID of the zone
returned: success
type: string
sample: example-com
'''
################################################################################
# Imports
################################################################################
import socket
from distutils.version import LooseVersion
try:
from libcloud import __version__ as LIBCLOUD_VERSION
from libcloud.common.google import InvalidRequestError
from libcloud.common.types import LibcloudError
from libcloud.dns.types import Provider
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import ZoneDoesNotExistError
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
################################################################################
# Constants
################################################################################
# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
# v1 API. Earlier versions contained the beta v1 API, which has since been
# deprecated and decommissioned.
MINIMUM_LIBCLOUD_VERSION = '0.19.0'
# The libcloud Google Cloud DNS provider.
PROVIDER = Provider.GOOGLE
# The records that libcloud's Google Cloud DNS provider supports.
#
# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
# this information and is the authoritative source on which records are
# supported, but accessing the dictionary requires creating a Google Cloud DNS
# driver object, which is done in a helper module.
#
# I'm hard-coding the supported record types here, because they (hopefully!)
# shouldn't change much, and it allows me to use it as a "choices" parameter
# in an AnsibleModule argument_spec.
SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
################################################################################
# Functions
################################################################################
def create_record(module, gcdns, zone, record):
"""Creates or overwrites a resource record."""
overwrite = module.boolean(module.params['overwrite'])
record_name = module.params['record']
record_type = module.params['type']
ttl = module.params['ttl']
record_data = module.params['record_data']
data = dict(ttl=ttl, rrdatas=record_data)
# Google Cloud DNS wants the trailing dot on all DNS names.
if record_name[-1] != '.':
record_name = record_name + '.'
# If we found a record, we need to check if the values match.
if record is not None:
# If the record matches, we obviously don't have to change anything.
if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
return False
# The record doesn't match, so we need to check if we can overwrite it.
if not overwrite:
module.fail_json(
msg = 'cannot overwrite existing record, overwrite protection enabled',
changed = False
)
# The record either doesn't exist, or it exists and we can overwrite it.
if record is None and not module.check_mode:
# There's no existing record, so we'll just create it.
try:
gcdns.create_record(record_name, zone, record_type, data)
except InvalidRequestError as error:
if error.code == 'invalid':
# The resource record name and type are valid by themselves, but
# not when combined (e.g., an 'A' record with "www.example.com"
# as its value).
module.fail_json(
msg = 'value is invalid for the given type: ' +
"%s, got value: %s" % (record_type, record_data),
changed = False
)
elif error.code == 'cnameResourceRecordSetConflict':
# We're attempting to create a CNAME resource record when we
# already have another type of resource record with the name
# domain name.
module.fail_json(
msg = "non-CNAME resource record already exists: %s" % record_name,
changed = False
)
else:
# The error is something else that we don't know how to handle,
# so we'll just re-raise the exception.
raise
elif record is not None and not module.check_mode:
# The Google provider in libcloud doesn't support updating a record in
# place, so if the record already exists, we need to delete it and
# recreate it using the new information.
gcdns.delete_record(record)
try:
gcdns.create_record(record_name, zone, record_type, data)
except InvalidRequestError:
# Something blew up when creating the record. This will usually be a
# result of invalid value data in the new record. Unfortunately, we
# already changed the state of the record by deleting the old one,
# so we'll try to roll back before failing out.
try:
gcdns.create_record(record.name, record.zone, record.type, record.data)
module.fail_json(
msg = 'error updating record, the original record was restored',
changed = False
)
except LibcloudError:
# We deleted the old record, couldn't create the new record, and
# couldn't roll back. That really sucks. We'll dump the original
# record to the failure output so the user can resore it if
# necessary.
module.fail_json(
msg = 'error updating record, and could not restore original record, ' +
"original name: %s " % record.name +
"original zone: %s " % record.zone +
"original type: %s " % record.type +
"original data: %s" % record.data,
changed = True)
return True
def remove_record(module, gcdns, record):
"""Remove a resource record."""
overwrite = module.boolean(module.params['overwrite'])
ttl = module.params['ttl']
record_data = module.params['record_data']
# If there is no record, we're obviously done.
if record is None:
return False
# If there is an existing record, do our values match the values of the
# existing record?
if not overwrite:
if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
module.fail_json(
msg = 'cannot delete due to non-matching ttl or record_data: ' +
"ttl: %d, record_data: %s " % (ttl, record_data) +
"original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
changed = False
)
# If we got to this point, we're okay to delete the record.
if not module.check_mode:
gcdns.delete_record(record)
return True
def _get_record(gcdns, zone, record_type, record_name):
"""Gets the record object for a given FQDN."""
# The record ID is a combination of its type and FQDN. For example, the
# ID of an A record for www.example.com would be 'A:www.example.com.'
record_id = "%s:%s" % (record_type, record_name)
try:
return gcdns.get_record(zone.id, record_id)
except RecordDoesNotExistError:
return None
def _get_zone(gcdns, zone_name, zone_id):
"""Gets the zone object for a given domain name."""
if zone_id is not None:
try:
return gcdns.get_zone(zone_id)
except ZoneDoesNotExistError:
return None
# To create a zone, we need to supply a domain name. However, to delete a
# zone, we need to supply a zone ID. Zone ID's are often based on domain
# names, but that's not guaranteed, so we'll iterate through the list of
# zones to see if we can find a matching domain name.
available_zones = gcdns.iterate_zones()
found_zone = None
for zone in available_zones:
if zone.domain == zone_name:
found_zone = zone
break
return found_zone
def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
"""Checks to see if original and new TTL and values match."""
matches = True
if old_ttl != new_ttl:
matches = False
if old_record_data != new_record_data:
matches = False
return matches
def _sanity_check(module):
"""Run sanity checks that don't depend on info from the zone/record."""
overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
ttl = module.params['ttl']
record_data = module.params['record_data']
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
)
# A negative TTL is not permitted (how would they even work?!).
if ttl < 0:
module.fail_json(
msg = 'TTL cannot be less than zero, got: %d' % ttl,
changed = False
)
# Deleting SOA records is not permitted.
if record_type == 'SOA' and state == 'absent':
module.fail_json(msg='cannot delete SOA records', changed=False)
# Updating SOA records is not permitted.
if record_type == 'SOA' and state == 'present' and overwrite:
module.fail_json(msg='cannot update SOA records', changed=False)
# Some sanity checks depend on what value was supplied.
if record_data is not None and (state == 'present' or not overwrite):
# A records must contain valid IPv4 addresses.
if record_type == 'A':
for value in record_data:
try:
socket.inet_aton(value)
except socket.error:
module.fail_json(
msg = 'invalid A record value, got: %s' % value,
changed = False
)
# AAAA records must contain valid IPv6 addresses.
if record_type == 'AAAA':
for value in record_data:
try:
socket.inet_pton(socket.AF_INET6, value)
except socket.error:
module.fail_json(
msg = 'invalid AAAA record value, got: %s' % value,
changed = False
)
# CNAME and SOA records can't have multiple values.
if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
module.fail_json(
msg = 'CNAME or SOA records cannot have more than one value, ' +
"got: %s" % record_data,
changed = False
)
# Google Cloud DNS does not support wildcard NS records.
if record_type == 'NS' and record_name[0] == '*':
module.fail_json(
msg = "wildcard NS records not allowed, got: %s" % record_name,
changed = False
)
# Values for txt records must begin and end with a double quote.
if record_type == 'TXT':
for value in record_data:
if value[0] != '"' and value[-1] != '"':
module.fail_json(
msg = 'TXT record_data must be enclosed in double quotes, ' +
'got: %s' % value,
changed = False
)
def _additional_sanity_checks(module, zone):
"""Run input sanity checks that depend on info from the zone/record."""
overwrite = module.params['overwrite']
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
# CNAME records are not allowed to have the same name as the root domain.
if record_type == 'CNAME' and record_name == zone.domain:
module.fail_json(
msg = 'CNAME records cannot match the zone name',
changed = False
)
# The root domain must always have an NS record.
if record_type == 'NS' and record_name == zone.domain and state == 'absent':
module.fail_json(
msg = 'cannot delete root NS records',
changed = False
)
# Updating NS records with the name as the root domain is not allowed
# because libcloud does not support in-place updates and root domain NS
# records cannot be removed.
if record_type == 'NS' and record_name == zone.domain and overwrite:
module.fail_json(
msg = 'cannot update existing root NS records',
changed = False
)
# SOA records with names that don't match the root domain are not permitted
# (and wouldn't make sense anyway).
if record_type == 'SOA' and record_name != zone.domain:
module.fail_json(
msg = 'non-root SOA records are not permitted, got: %s' % record_name,
changed = False
)
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
record = dict(required=True, aliases=['name'], type='str'),
zone = dict(type='str'),
zone_id = dict(type='str'),
type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
record_data = dict(aliases=['value'], type='list'),
ttl = dict(default=300, type='int'),
overwrite = dict(default=False, type='bool'),
service_account_email = dict(type='str'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(type='str')
),
required_if = [
('state', 'present', ['record_data']),
('overwrite', False, ['record_data'])
],
required_one_of = [['zone', 'zone_id']],
supports_check_mode = True
)
_sanity_check(module)
record_name = module.params['record']
record_type = module.params['type']
state = module.params['state']
ttl = module.params['ttl']
zone_name = module.params['zone']
zone_id = module.params['zone_id']
json_output = dict(
state = state,
record = record_name,
zone = zone_name,
zone_id = zone_id,
type = record_type,
record_data = module.params['record_data'],
ttl = ttl,
overwrite = module.boolean(module.params['overwrite'])
)
# Google Cloud DNS wants the trailing dot on all DNS names.
if zone_name is not None and zone_name[-1] != '.':
zone_name = zone_name + '.'
if record_name[-1] != '.':
record_name = record_name + '.'
# Build a connection object that we can use to connect with Google Cloud
# DNS.
gcdns = gcdns_connect(module, provider=PROVIDER)
# We need to check that the zone we're creating a record for actually
# exists.
zone = _get_zone(gcdns, zone_name, zone_id)
if zone is None and zone_name is not None:
module.fail_json(
msg = 'zone name was not found: %s' % zone_name,
changed = False
)
elif zone is None and zone_id is not None:
module.fail_json(
msg = 'zone id was not found: %s' % zone_id,
changed = False
)
# Populate the returns with the actual zone information.
json_output['zone'] = zone.domain
json_output['zone_id'] = zone.id
# We also need to check if the record we want to create or remove actually
# exists.
try:
record = _get_record(gcdns, zone, record_type, record_name)
except InvalidRequestError:
# We gave Google Cloud DNS an invalid DNS record name.
module.fail_json(
msg = 'record name is invalid: %s' % record_name,
changed = False
)
_additional_sanity_checks(module, zone)
diff = dict()
# Build the 'before' diff
if record is None:
diff['before'] = ''
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
record = record.data['name'],
type = record.data['type'],
record_data = record.data['rrdatas'],
ttl = record.data['ttl']
)
diff['before_header'] = "%s:%s" % (record_type, record_name)
# Create, remove, or modify the record.
if state == 'present':
diff['after'] = dict(
record = record_name,
type = record_type,
record_data = module.params['record_data'],
ttl = ttl
)
diff['after_header'] = "%s:%s" % (record_type, record_name)
changed = create_record(module, gcdns, zone, record)
elif state == 'absent':
diff['after'] = ''
diff['after_header'] = '<absent>'
changed = remove_record(module, gcdns, record)
module.exit_json(changed=changed, diff=diff, **json_output)
from ansible.module_utils.basic import *
from ansible.module_utils.gcdns import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Anonymike/pasta-bot | plugins/google_broken.py | 1 | 3457 | import random
from util import hook, http, text, database, web
import re
def api_get(kind, query):
"""Use the RESTful Google Search API"""
url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=off'
return http.get_json(url % kind, q=query)
@hook.command('search')
@hook.command('g')
@hook.command
def google(inp,db=None,chan=None):
"""google <query> -- Returns first google search result for <query>."""
trimlength = database.get(db,'channels','trimlength','chan',chan)
if not trimlength: trimlength = 9999
parsed = api_get('web', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'No results found.'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
content = http.unescape(result['content'])
if not content: content = "No description available."
else: content = http.html.fromstring(content.replace('\n', '')).text_content()
return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
# @hook.command('image')
@hook.command('gis')
@hook.command('gi')
@hook.command('image')
@hook.command
def googleimage(inp):
"""gis <query> -- Returns first Google Image result for <query>."""
parsed = api_get('images', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
@hook.command
def gcalc(inp):
"gcalc <term> -- Calculate <term> with Google Calc."
soup = http.get_soup('http://www.google.com/search', q=inp)
result = soup.find('span', {'class': 'cwcot'})
formula = soup.find('span', {'class': 'cwclet'})
if not result:
return "Could not calculate '{}'".format(inp)
return u"{} {}".format(formula.contents[0].strip(),result.contents[0].strip())
@hook.regex(r'^\>(.*\.(gif|GIF|jpg|JPG|jpeg|JPEG|png|PNG|tiff|TIFF|bmp|BMP))\s?(\d+)?')
@hook.command
def implying(inp):
""">laughing girls.gif <num> -- Returns first Google Image result for <query>."""
try: search = inp.group(1)
except: search = inp
try: num = int(inp.group(3))
except: num = 0
if 'http' in search: return
parsed = api_get('images', search)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
try: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][num]['unescapedUrl'])
except: return u'\x033\x02>{}\x02\x03 {}'.format(search, parsed['responseData']['results'][:10][0]['unescapedUrl'])
#return random.choice(parsed['responseData']['results'][:10])['unescapedUrl']
@hook.command('nym')
@hook.command('littleanon')
@hook.command('gfy')
@hook.command
def lmgtfy(inp, bot=None):
"lmgtfy [phrase] - Posts a google link for the specified phrase"
link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp)
try:
return web.isgd(link)
except (web.ShortenError, http.HTTPError):
return link
| gpl-3.0 |
rhurkes/chasegame | venv/lib/python2.7/site-packages/flask/sessions.py | 428 | 13107 | # -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from itsdangerous import URLSafeTimedSerializer, BadSignature
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes `False` in.
new = False
#: for some backends this will always be `True`, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes `True` in.
modified = True
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
return json.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' b':
return b64decode(the_value)
elif the_key == ' m':
return Markup(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Baseclass for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('the session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns `None` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop of the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's `None`.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or `None` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def open_session(self, app, request):
"""This method has to be implemented and must either return `None`
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
from flask.debughelpers import UnexpectedUnicodeError
| mit |
aequitas/home-assistant | homeassistant/components/homematicip_cloud/sensor.py | 2 | 12041 | """Support for HomematicIP Cloud sensors."""
import logging
from homematicip.aio.device import (
AsyncBrandSwitchMeasuring, AsyncFullFlushSwitchMeasuring,
AsyncHeatingThermostat, AsyncHeatingThermostatCompact, AsyncLightSensor,
AsyncMotionDetectorIndoor, AsyncMotionDetectorOutdoor,
AsyncMotionDetectorPushButton, AsyncPlugableSwitchMeasuring,
AsyncPresenceDetectorIndoor, AsyncTemperatureHumiditySensorDisplay,
AsyncTemperatureHumiditySensorOutdoor,
AsyncTemperatureHumiditySensorWithoutDisplay, AsyncWeatherSensor,
AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
from homematicip.aio.home import AsyncHome
from homematicip.base.enums import ValveState
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE, POWER_WATT, TEMP_CELSIUS)
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
ATTR_TEMPERATURE_OFFSET = 'temperature_offset'
ATTR_WIND_DIRECTION = 'wind_direction'
ATTR_WIND_DIRECTION_VARIATION = 'wind_direction_variation_in_degree'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud sensors devices."""
pass
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities) -> None:
"""Set up the HomematicIP Cloud sensors from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = [HomematicipAccesspointStatus(home)]
for device in home.devices:
if isinstance(device, (AsyncHeatingThermostat,
AsyncHeatingThermostatCompact)):
devices.append(HomematicipHeatingThermostat(home, device))
devices.append(HomematicipTemperatureSensor(home, device))
if isinstance(device, (AsyncTemperatureHumiditySensorDisplay,
AsyncTemperatureHumiditySensorWithoutDisplay,
AsyncTemperatureHumiditySensorOutdoor,
AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro)):
devices.append(HomematicipTemperatureSensor(home, device))
devices.append(HomematicipHumiditySensor(home, device))
if isinstance(device, (AsyncLightSensor, AsyncMotionDetectorIndoor,
AsyncMotionDetectorOutdoor,
AsyncMotionDetectorPushButton,
AsyncPresenceDetectorIndoor,
AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro)):
devices.append(HomematicipIlluminanceSensor(home, device))
if isinstance(device, (AsyncPlugableSwitchMeasuring,
AsyncBrandSwitchMeasuring,
AsyncFullFlushSwitchMeasuring)):
devices.append(HomematicipPowerSensor(home, device))
if isinstance(device, (AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro)):
devices.append(HomematicipWindspeedSensor(home, device))
if isinstance(device, (AsyncWeatherSensorPlus,
AsyncWeatherSensorPro)):
devices.append(HomematicipTodayRainSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipAccesspointStatus(HomematicipGenericDevice):
"""Representation of an HomeMaticIP Cloud access point."""
def __init__(self, home: AsyncHome) -> None:
"""Initialize access point device."""
super().__init__(home, home)
@property
def device_info(self):
"""Return device specific attributes."""
# Adds a sensor to the existing HAP device
return {
'identifiers': {
# Serial numbers of Homematic IP device
(HMIPC_DOMAIN, self._device.id)
}
}
@property
def icon(self) -> str:
"""Return the icon of the access point device."""
return 'mdi:access-point-network'
@property
def state(self) -> float:
"""Return the state of the access point."""
return self._home.dutyCycle
@property
def available(self) -> bool:
"""Device available."""
return self._home.connected
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return '%'
class HomematicipHeatingThermostat(HomematicipGenericDevice):
"""Represenation of a HomematicIP heating thermostat device."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize heating thermostat device."""
super().__init__(home, device, 'Heating')
@property
def icon(self) -> str:
"""Return the icon."""
if super().icon:
return super().icon
if self._device.valveState != ValveState.ADAPTION_DONE:
return 'mdi:alert'
return 'mdi:radiator'
@property
def state(self) -> int:
"""Return the state of the radiator valve."""
if self._device.valveState != ValveState.ADAPTION_DONE:
return self._device.valveState
return round(self._device.valvePosition*100)
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return '%'
class HomematicipHumiditySensor(HomematicipGenericDevice):
"""Represenation of a HomematicIP Cloud humidity device."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the thermometer device."""
super().__init__(home, device, 'Humidity')
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_HUMIDITY
@property
def state(self) -> int:
"""Return the state."""
return self._device.humidity
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return '%'
class HomematicipTemperatureSensor(HomematicipGenericDevice):
"""Representation of a HomematicIP Cloud thermometer device."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the thermometer device."""
super().__init__(home, device, 'Temperature')
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_TEMPERATURE
@property
def state(self) -> float:
"""Return the state."""
if hasattr(self._device, 'valveActualTemperature'):
return self._device.valveActualTemperature
return self._device.actualTemperature
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the state attributes of the windspeed sensor."""
attr = super().device_state_attributes
if hasattr(self._device, 'temperatureOffset') and \
self._device.temperatureOffset:
attr[ATTR_TEMPERATURE_OFFSET] = self._device.temperatureOffset
return attr
class HomematicipIlluminanceSensor(HomematicipGenericDevice):
"""Represenation of a HomematicIP Illuminance device."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the device."""
super().__init__(home, device, 'Illuminance')
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_ILLUMINANCE
@property
def state(self) -> float:
"""Return the state."""
if hasattr(self._device, 'averageIllumination'):
return self._device.averageIllumination
return self._device.illumination
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return 'lx'
class HomematicipPowerSensor(HomematicipGenericDevice):
"""Represenation of a HomematicIP power measuring device."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the device."""
super().__init__(home, device, 'Power')
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_POWER
@property
def state(self) -> float:
"""Represenation of the HomematicIP power comsumption value."""
return self._device.currentPowerConsumption
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return POWER_WATT
class HomematicipWindspeedSensor(HomematicipGenericDevice):
"""Represenation of a HomematicIP wind speed sensor."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the device."""
super().__init__(home, device, 'Windspeed')
@property
def state(self) -> float:
"""Represenation of the HomematicIP wind speed value."""
return self._device.windSpeed
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return 'km/h'
@property
def device_state_attributes(self):
"""Return the state attributes of the wind speed sensor."""
attr = super().device_state_attributes
if hasattr(self._device, 'windDirection') and \
self._device.windDirection:
attr[ATTR_WIND_DIRECTION] = \
_get_wind_direction(self._device.windDirection)
if hasattr(self._device, 'windDirectionVariation') and \
self._device.windDirectionVariation:
attr[ATTR_WIND_DIRECTION_VARIATION] = \
self._device.windDirectionVariation
return attr
class HomematicipTodayRainSensor(HomematicipGenericDevice):
"""Represenation of a HomematicIP rain counter of a day sensor."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the device."""
super().__init__(home, device, 'Today Rain')
@property
def state(self) -> float:
"""Represenation of the HomematicIP todays rain value."""
return round(self._device.todayRainCounter, 2)
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return 'mm'
def _get_wind_direction(wind_direction_degree: float) -> str:
"""Convert wind direction degree to named direction."""
if 11.25 <= wind_direction_degree < 33.75:
return 'NNE'
if 33.75 <= wind_direction_degree < 56.25:
return 'NE'
if 56.25 <= wind_direction_degree < 78.75:
return 'ENE'
if 78.75 <= wind_direction_degree < 101.25:
return 'E'
if 101.25 <= wind_direction_degree < 123.75:
return 'ESE'
if 123.75 <= wind_direction_degree < 146.25:
return 'SE'
if 146.25 <= wind_direction_degree < 168.75:
return 'SSE'
if 168.75 <= wind_direction_degree < 191.25:
return 'S'
if 191.25 <= wind_direction_degree < 213.75:
return 'SSW'
if 213.75 <= wind_direction_degree < 236.25:
return 'SW'
if 236.25 <= wind_direction_degree < 258.75:
return 'WSW'
if 258.75 <= wind_direction_degree < 281.25:
return 'W'
if 281.25 <= wind_direction_degree < 303.75:
return 'WNW'
if 303.75 <= wind_direction_degree < 326.25:
return 'NW'
if 326.25 <= wind_direction_degree < 348.75:
return 'NNW'
return 'N'
| apache-2.0 |
boshnivolo/TIY-GitHub | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py | 395 | 65937 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| mit |
vhaupert/mitmproxy | test/pathod/tservers.py | 4 | 3619 | import os
import tempfile
import re
import shutil
import requests
import io
import urllib
from mitmproxy.net import tcp
from mitmproxy.utils import data
from pathod import language
from pathod import pathoc
from pathod import pathod
from pathod import test
from pathod.pathod import CA_CERT_NAME
cdata = data.Data(__name__)
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = io.BytesIO(bytes)
return tcp.Reader(fp)
class DaemonTests:
nohang = False
ssl = False
timeout = None
hexdump = False
ssloptions = None
nocraft = False
explain = True
@classmethod
def setup_class(cls):
opts = cls.ssloptions or {}
cls.confdir = tempfile.mkdtemp()
opts["confdir"] = cls.confdir
so = pathod.SSLOptions(**opts)
cls.d = test.Daemon(
staticdir=cdata.path("data"),
anchors=[
(re.compile("/anchor/.*"), "202:da")
],
ssl=cls.ssl,
ssloptions=so,
sizelimit=1 * 1024 * 1024,
nohang=cls.nohang,
timeout=cls.timeout,
hexdump=cls.hexdump,
nocraft=cls.nocraft,
logreq=True,
logresp=True,
explain=cls.explain
)
@classmethod
def teardown_class(cls):
cls.d.shutdown()
shutil.rmtree(cls.confdir)
def teardown(self):
self.d.wait_for_silence()
self.d.clear_log()
def _getpath(self, path, params=None):
scheme = "https" if self.ssl else "http"
resp = requests.get(
"%s://localhost:%s/%s" % (
scheme,
self.d.port,
path
),
verify=os.path.join(self.d.thread.server.ssloptions.confdir, CA_CERT_NAME),
params=params
)
return resp
def getpath(self, path, params=None):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
if params:
path = path + "?" + urllib.parse.urlencode(params)
resp = c.request("get:%s" % path)
return resp
def get(self, spec):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
resp = c.request(
"get:/p/%s" % urllib.parse.quote(spec)
)
return resp
def pathoc(
self,
specs,
timeout=None,
connect_to=None,
ssl=None,
ws_read_limit=None,
use_http2=False,
):
"""
Returns a (messages, text log) tuple.
"""
if ssl is None:
ssl = self.ssl
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=ssl,
ws_read_limit=ws_read_limit,
timeout=timeout,
fp=logfp,
use_http2=use_http2,
)
with c.connect(connect_to):
ret = []
for i in specs:
resp = c.request(i)
if resp:
ret.append(resp)
for frm in c.wait():
ret.append(frm)
c.stop()
return ret, logfp.getvalue()
def render(r, settings=language.Settings()):
r = r.resolve(settings)
s = io.BytesIO()
assert language.serve(r, s, settings)
return s.getvalue()
| mit |
doduytrung/odoo-8.0 | addons/project_timesheet/__openerp__.py | 260 | 2151 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bill Time on Tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
Synchronization of project task work entries with timesheet entries.
====================================================================
This module lets you transfer the entries under tasks defined for Project
Management to the Timesheet line entries for particular date and particular user
with the effect of creating, editing and deleting either ways.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/project-management',
'depends': ['resource', 'project', 'hr_timesheet_sheet', 'hr_timesheet_invoice', 'account_analytic_analysis', 'procurement'],
'data': [
'security/ir.model.access.csv',
'security/project_timesheet_security.xml',
'report/task_report_view.xml',
'project_timesheet_view.xml',
],
'demo': ['project_timesheet_demo.xml'],
'test': [
'test/worktask_entry_to_timesheetline_entry.yml',
'test/work_timesheet.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
enthought/traitsgui | enthought/pyface/message_dialog.py | 3 | 1716 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The implementation of a dialog that displays a message. """
# Convenience functions.
def information(parent, message, title='Information'):
""" Convenience function to show an information message dialog. """
dialog = MessageDialog(
parent=parent, message=message, title=title, severity='information'
)
dialog.open()
return
def warning(parent, message, title='Warning'):
""" Convenience function to show a warning message dialog. """
dialog = MessageDialog(
parent=parent, message=message, title=title, severity='warning'
)
dialog.open()
return
def error(parent, message, title='Error'):
""" Convenience function to show an error message dialog. """
dialog = MessageDialog(
parent=parent, message=message, title=title, severity='error'
)
dialog.open()
return
# Import the toolkit specific version.
from toolkit import toolkit_object
MessageDialog = toolkit_object('message_dialog:MessageDialog')
#### EOF ######################################################################
| bsd-3-clause |
Asquera/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/sunf90.py | 61 | 2180 | """SCons.Tool.sunf90
Tool-specific initialization for sunf90, the Sun Studio F90 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf90.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
def generate(env):
"""Add Builders and construction variables for sun f90 compiler to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rally12/deep-learning | language-translation/problem_unittests.py | 98 | 13080 | import numpy as np
import tensorflow as tf
import itertools
import collections
import helper
def _print_success_message():
print('Tests Passed')
def test_text_to_ids(text_to_ids):
test_source_text = 'new jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .'
test_target_text = 'new jersey est parfois calme pendant l\' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .'
test_source_text = test_source_text.lower()
test_target_text = test_target_text.lower()
source_vocab_to_int, source_int_to_vocab = helper.create_lookup_tables(test_source_text)
target_vocab_to_int, target_int_to_vocab = helper.create_lookup_tables(test_target_text)
test_source_id_seq, test_target_id_seq = text_to_ids(test_source_text, test_target_text, source_vocab_to_int, target_vocab_to_int)
assert len(test_source_id_seq) == len(test_source_text.split('\n')),\
'source_id_text has wrong length, it should be {}.'.format(len(test_source_text.split('\n')))
assert len(test_target_id_seq) == len(test_target_text.split('\n')), \
'target_id_text has wrong length, it should be {}.'.format(len(test_target_text.split('\n')))
target_not_iter = [type(x) for x in test_source_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter,\
'Element in source_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
target_not_iter = [type(x) for x in test_target_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter, \
'Element in target_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
source_changed_length = [(words, word_ids)
for words, word_ids in zip(test_source_text.split('\n'), test_source_id_seq)
if len(words.split()) != len(word_ids)]
assert not source_changed_length,\
'Source text changed in size from {} word(s) to {} id(s): {}'.format(
len(source_changed_length[0][0].split()), len(source_changed_length[0][1]), source_changed_length[0][1])
target_missing_end = [word_ids for word_ids in test_target_id_seq if word_ids[-1] != target_vocab_to_int['<EOS>']]
assert not target_missing_end,\
'Missing <EOS> id at the end of {}'.format(target_missing_end[0])
target_bad_size = [(words.split(), word_ids)
for words, word_ids in zip(test_target_text.split('\n'), test_target_id_seq)
if len(word_ids) != len(words.split()) + 1]
assert not target_bad_size,\
'Target text incorrect size. {} should be length {}'.format(
target_bad_size[0][1], len(target_bad_size[0][0]) + 1)
source_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_source_text.split('\n') for word in sentence.split()],
itertools.chain.from_iterable(test_source_id_seq))
if source_vocab_to_int[word] != word_id]
assert not source_bad_id,\
'Source word incorrectly converted from {} to id {}.'.format(source_bad_id[0][0], source_bad_id[0][1])
target_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_target_text.split('\n') for word in sentence.split()],
[word_id for word_ids in test_target_id_seq for word_id in word_ids[:-1]])
if target_vocab_to_int[word] != word_id]
assert not target_bad_id,\
'Target word incorrectly converted from {} to id {}.'.format(target_bad_id[0][0], target_bad_id[0][1])
_print_success_message()
def test_model_inputs(model_inputs):
with tf.Graph().as_default():
input_data, targets, lr, keep_prob = model_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input is not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets is not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate is not a Placeholder.'
assert keep_prob.op.type == 'Placeholder', \
'Keep Probability is not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
assert keep_prob.name == 'keep_prob:0', \
'Keep Probability has bad name. Found name {}'.format(keep_prob.name)
assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')
_print_success_message()
def test_encoding_layer(encoding_layer):
rnn_size = 512
batch_size = 64
num_layers = 3
with tf.Graph().as_default():
rnn_inputs = tf.placeholder(tf.float32, [batch_size, 22, 1000])
keep_prob = tf.placeholder(tf.float32)
states = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob)
assert len(states) == num_layers,\
'Found {} state(s). It should be {} states.'.format(len(states), num_layers)
bad_types = [type(state) for state in states if not isinstance(state, tf.contrib.rnn.LSTMStateTuple)]
assert not bad_types,\
'Found wrong type: {}'.format(bad_types[0])
bad_shapes = [state_tensor.get_shape()
for state in states
for state_tensor in state
if state_tensor.get_shape().as_list() not in [[None, rnn_size], [batch_size, rnn_size]]]
assert not bad_shapes,\
'Found wrong shape: {}'.format(bad_shapes[0])
_print_success_message()
def test_decoding_layer(decoding_layer):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
dec_embed_input = tf.placeholder(tf.float32, [batch_size, 22, embedding_size])
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
train_output, inf_output = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
assert isinstance(train_output, tf.Tensor),\
'Train Logits is wrong type: {}'.format(type(train_output))
assert isinstance(inf_output, tf.Tensor), \
'Inference Logits is wrong type: {}'.format(type(inf_output))
assert train_output.get_shape().as_list() == [batch_size, None, vocab_size],\
'Train Logits is the wrong shape: {}'.format(train_output.get_shape())
assert inf_output.get_shape().as_list() == [None, None, vocab_size], \
'Inference Logits is the wrong shape: {}'.format(inf_output.get_shape())
_print_success_message()
def test_seq2seq_model(seq2seq_model):
batch_size = 64
target_vocab_size = 300
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
input_data = tf.placeholder(tf.int32, [64, 22])
target_data = tf.placeholder(tf.int32, [64, 22])
keep_prob = tf.placeholder(tf.float32)
train_output, inf_output = seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length,
200, target_vocab_size, 64, 80, rnn_size, num_layers, target_vocab_to_int)
assert isinstance(train_output, tf.Tensor),\
'Train Logits is wrong type: {}'.format(type(train_output))
assert isinstance(inf_output, tf.Tensor), \
'Inference Logits is wrong type: {}'.format(type(inf_output))
assert train_output.get_shape().as_list() == [batch_size, None, target_vocab_size],\
'Train Logits is the wrong shape: {}'.format(train_output.get_shape())
assert inf_output.get_shape().as_list() == [None, None, target_vocab_size], \
'Inference Logits is the wrong shape: {}'.format(inf_output.get_shape())
_print_success_message()
def test_sentence_to_seq(sentence_to_seq):
sentence = 'this is a test sentence'
vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4}
output = sentence_to_seq(sentence, vocab_to_int)
assert len(output) == 5,\
'Wrong length. Found a length of {}'.format(len(output))
assert output[3] == 2,\
'Missing <UNK> id.'
assert np.array_equal(output, [3, 6, 5, 2, 4]),\
'Incorrect ouput. Found {}'.format(output)
_print_success_message()
def test_process_decoding_input(process_decoding_input):
batch_size = 2
seq_length = 3
target_vocab_to_int = {'<GO>': 3}
with tf.Graph().as_default():
target_data = tf.placeholder(tf.int32, [batch_size, seq_length])
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
assert dec_input.get_shape() == (batch_size, seq_length),\
'Wrong shape returned. Found {}'.format(dec_input.get_shape())
test_target_data = [[10, 20, 30], [40, 18, 23]]
with tf.Session() as sess:
test_dec_input = sess.run(dec_input, {target_data: test_target_data})
assert test_dec_input[0][0] == target_vocab_to_int['<GO>'] and\
test_dec_input[1][0] == target_vocab_to_int['<GO>'],\
'Missing GO Id.'
_print_success_message()
def test_decoding_layer_train(decoding_layer_train):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
dec_embed_input = tf.placeholder(tf.float32, [batch_size, 22, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length,
decoding_scope, output_fn, keep_prob)
assert train_logits.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_logits.get_shape())
_print_success_message()
def test_decoding_layer_infer(decoding_layer_infer):
vocab_size = 1000
sequence_length = 22
embedding_size = 200
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
infer_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, 10, 20,
sequence_length, vocab_size, decoding_scope, output_fn, keep_prob)
assert infer_logits.get_shape().as_list() == [None, None, vocab_size], \
'Wrong shape returned. Found {}'.format(infer_logits.get_shape())
_print_success_message()
| mit |
earshel/PokeyPyManager | POGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse_pb2.py | 16 | 5285 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\nGPOGOProtos/Networking/Responses/CollectDailyDefenderBonusResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\x97\x02\n!CollectDailyDefenderBonusResponse\x12Y\n\x06result\x18\x01 \x01(\x0e\x32I.POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.Result\x12\x15\n\rcurrency_type\x18\x02 \x03(\t\x12\x18\n\x10\x63urrency_awarded\x18\x03 \x03(\x05\x12\x17\n\x0f\x64\x65\x66\x65nders_count\x18\x04 \x01(\x05\"M\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x0b\n\x07\x46\x41ILURE\x10\x02\x12\x0c\n\x08TOO_SOON\x10\x03\x12\x10\n\x0cNO_DEFENDERS\x10\x04\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOO_SOON', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_DEFENDERS', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=311,
serialized_end=388,
)
_sym_db.RegisterEnumDescriptor(_COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT)
_COLLECTDAILYDEFENDERBONUSRESPONSE = _descriptor.Descriptor(
name='CollectDailyDefenderBonusResponse',
full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currency_type', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.currency_type', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='currency_awarded', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.currency_awarded', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='defenders_count', full_name='POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse.defenders_count', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=388,
)
_COLLECTDAILYDEFENDERBONUSRESPONSE.fields_by_name['result'].enum_type = _COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT
_COLLECTDAILYDEFENDERBONUSRESPONSE_RESULT.containing_type = _COLLECTDAILYDEFENDERBONUSRESPONSE
DESCRIPTOR.message_types_by_name['CollectDailyDefenderBonusResponse'] = _COLLECTDAILYDEFENDERBONUSRESPONSE
CollectDailyDefenderBonusResponse = _reflection.GeneratedProtocolMessageType('CollectDailyDefenderBonusResponse', (_message.Message,), dict(
DESCRIPTOR = _COLLECTDAILYDEFENDERBONUSRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.CollectDailyDefenderBonusResponse)
))
_sym_db.RegisterMessage(CollectDailyDefenderBonusResponse)
# @@protoc_insertion_point(module_scope)
| mit |
LeeKamentsky/CellProfiler | cellprofiler/modules/tests/test_identifyprimaryobjects.py | 2 | 158497 | """test_identifyprimautomatic.py: test the IdentifyPrimAutomatic module
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
import os
import base64
import unittest
import numpy as np
import scipy.ndimage
import tempfile
import StringIO
import zlib
import cellprofiler.modules.identifyprimaryobjects as ID
import cellprofiler.modules.identify as I
import cellprofiler.cpmath.threshold as T
from cellprofiler.modules.injectimage import InjectImage
import cellprofiler.settings
import cellprofiler.cpimage as cpi
import cellprofiler.objects as cpo
import cellprofiler.measurements as cpmeas
import cellprofiler.pipeline
from cellprofiler.workspace import Workspace
from cellprofiler.modules.tests import read_example_image
IMAGE_NAME = "my_image"
OBJECTS_NAME = "my_objects"
BINARY_IMAGE_NAME = "binary_image"
MASKING_OBJECTS_NAME = "masking_objects"
MEASUREMENT_NAME = "my_measurement"
class test_IdentifyPrimaryObjects(unittest.TestCase):
def load_error_handler(self, caller, event):
if isinstance(event, cellprofiler.pipeline.LoadExceptionEvent):
self.fail(event.error.message)
def make_workspace(self, image,
mask = None,
labels = None,
binary_image = None):
'''Make a workspace and IdentifyPrimaryObjects module
image - the intensity image for thresholding
mask - if present, the "don't analyze" mask of the intensity image
labels - if thresholding per-object, the labels matrix needed
binary_image - if thresholding using a binary image, the image
'''
module = ID.IdentifyPrimaryObjects()
module.module_num = 1
module.image_name.value = IMAGE_NAME
module.object_name.value = OBJECTS_NAME
module.binary_image.value = BINARY_IMAGE_NAME
module.masking_objects.value = MASKING_OBJECTS_NAME
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_module(module)
m = cpmeas.Measurements()
cpimage = cpi.Image(image, mask = mask)
m.add(IMAGE_NAME, cpimage)
if binary_image is not None:
m.add(BINARY_IMAGE_NAME, cpi.Image(binary_image))
object_set = cpo.ObjectSet()
if labels is not None:
o = cpo.Objects()
o.segmented = labels
object_set.add_objects(o, MASKING_OBJECTS_NAME)
workspace = cellprofiler.workspace.Workspace(
pipeline, module, m, object_set, m, None)
return workspace, module
def test_00_00_init(self):
x = ID.IdentifyPrimaryObjects()
def test_02_000_test_zero_objects(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min =.1
x.threshold_range.max = 1
x.watershed_method.value = ID.WA_NONE
img = np.zeros((25,25))
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented == 0))
self.assertTrue("Image" in measurements.get_object_names())
self.assertTrue("my_object" in measurements.get_object_names())
self.assertTrue("Threshold_FinalThreshold_my_object" in measurements.get_feature_names("Image"))
self.assertTrue("Count_my_object" in measurements.get_feature_names("Image"))
count = measurements.get_current_measurement("Image","Count_my_object")
self.assertEqual(count,0)
self.assertTrue("Location_Center_X" in measurements.get_feature_names("my_object"))
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),0)
self.assertTrue("Location_Center_Y" in measurements.get_feature_names("my_object"))
location_center_y = measurements.get_current_measurement("my_object","Location_Center_Y")
self.assertTrue(isinstance(location_center_y,np.ndarray))
self.assertEqual(np.product(location_center_y.shape),0)
def test_02_001_test_zero_objects_wa_in_lo_in(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min = .1
x.threshold_range.max = 1
x.watershed_method.value = ID.WA_INTENSITY
x.unclump_method.value = ID.UN_INTENSITY
img = np.zeros((25,25))
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented == 0))
def test_02_002_test_zero_objects_wa_di_lo_in(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min = .1
x.threshold_range.max = 1
x.watershed_method.value = ID.WA_SHAPE
x.unclump_method.value = ID.UN_INTENSITY
img = np.zeros((25,25))
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented == 0))
def test_02_003_test_zero_objects_wa_in_lo_sh(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min = .1
x.threshold_range.max = 1
x.watershed_method.value = ID.WA_INTENSITY
x.unclump_method.value = ID.UN_SHAPE
img = np.zeros((25,25))
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented == 0))
def test_02_004_test_zero_objects_wa_di_lo_sh(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min = .1
x.threshold_range.max = 1
x.watershed_method.value = ID.WA_SHAPE
x.unclump_method.value = ID.UN_SHAPE
img = np.zeros((25,25))
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented == 0))
def test_02_01_test_one_object(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = one_cell_image()
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented[img>0] == 1))
self.assertTrue(np.all(img[segmented==1] > 0))
self.assertTrue("Image" in measurements.get_object_names())
self.assertTrue("my_object" in measurements.get_object_names())
self.assertTrue("Threshold_FinalThreshold_my_object" in measurements.get_feature_names("Image"))
threshold = measurements.get_current_measurement("Image","Threshold_FinalThreshold_my_object")
self.assertTrue(threshold < .5)
self.assertTrue("Count_my_object" in measurements.get_feature_names("Image"))
count = measurements.get_current_measurement("Image","Count_my_object")
self.assertEqual(count,1)
self.assertTrue("Location_Center_Y" in measurements.get_feature_names("my_object"))
location_center_y = measurements.get_current_measurement("my_object","Location_Center_Y")
self.assertTrue(isinstance(location_center_y,np.ndarray))
self.assertEqual(np.product(location_center_y.shape),1)
self.assertTrue(location_center_y[0]>8)
self.assertTrue(location_center_y[0]<12)
self.assertTrue("Location_Center_X" in measurements.get_feature_names("my_object"))
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),1)
self.assertTrue(location_center_x[0]>13)
self.assertTrue(location_center_x[0]<16)
columns = x.get_measurement_columns(pipeline)
for object_name in (cpmeas.IMAGE, "my_object"):
ocolumns =[x for x in columns if x[0] == object_name]
features = measurements.get_feature_names(object_name)
self.assertEqual(len(ocolumns), len(features))
self.assertTrue(all([column[1] in features for column in ocolumns]))
def test_02_02_test_two_objects(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = two_cell_image()
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
self.assertTrue("Image" in measurements.get_object_names())
self.assertTrue("my_object" in measurements.get_object_names())
self.assertTrue("Threshold_FinalThreshold_my_object" in measurements.get_feature_names("Image"))
threshold = measurements.get_current_measurement("Image","Threshold_FinalThreshold_my_object")
self.assertTrue(threshold < .6)
self.assertTrue("Count_my_object" in measurements.get_feature_names("Image"))
count = measurements.get_current_measurement("Image","Count_my_object")
self.assertEqual(count,2)
self.assertTrue("Location_Center_Y" in measurements.get_feature_names("my_object"))
location_center_y = measurements.get_current_measurement("my_object","Location_Center_Y")
self.assertTrue(isinstance(location_center_y,np.ndarray))
self.assertEqual(np.product(location_center_y.shape),2)
self.assertTrue(location_center_y[0]>8)
self.assertTrue(location_center_y[0]<12)
self.assertTrue(location_center_y[1]>28)
self.assertTrue(location_center_y[1]<32)
self.assertTrue("Location_Center_Y" in measurements.get_feature_names("my_object"))
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),2)
self.assertTrue(location_center_x[0]>33)
self.assertTrue(location_center_x[0]<37)
self.assertTrue(location_center_x[1]>13)
self.assertTrue(location_center_x[1]<16)
def test_02_03_test_threshold_range(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min = .7
x.threshold_range.max = 1
x.threshold_correction_factor.value = .95
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = I.TM_MCT
x.exclude_size.value = False
x.watershed_method.value = ID.WA_NONE
img = two_cell_image()
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
self.assertTrue("Image" in measurements.get_object_names())
self.assertTrue("my_object" in measurements.get_object_names())
self.assertTrue("Threshold_FinalThreshold_my_object" in measurements.get_feature_names("Image"))
threshold = measurements.get_current_measurement("Image","Threshold_FinalThreshold_my_object")
self.assertTrue(threshold < .8)
self.assertTrue(threshold > .6)
self.assertTrue("Count_my_object" in measurements.get_feature_names("Image"))
count = measurements.get_current_measurement("Image","Count_my_object")
self.assertEqual(count,1)
self.assertTrue("Location_Center_Y" in measurements.get_feature_names("my_object"))
location_center_y = measurements.get_current_measurement("my_object","Location_Center_Y")
self.assertTrue(isinstance(location_center_y,np.ndarray))
self.assertEqual(np.product(location_center_y.shape),1)
self.assertTrue(location_center_y[0]>8)
self.assertTrue(location_center_y[0]<12)
self.assertTrue("Location_Center_X" in measurements.get_feature_names("my_object"))
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),1)
self.assertTrue(location_center_x[0]>33)
self.assertTrue(location_center_x[0]<36)
def test_02_04_fill_holes(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.fill_holes.value = True
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.zeros((40,40))
draw_circle(img, (10,10), 7, .5)
draw_circle(img, (30,30), 7, .5)
img[10,10] = 0
img[30,30] = 0
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertTrue(objects.segmented[10,10] > 0)
self.assertTrue(objects.segmented[30,30] > 0)
def test_02_05_dont_fill_holes(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.threshold_range.min = .7
x.threshold_range.max = 1
x.exclude_size.value = False
x.fill_holes.value = False
x.smoothing_filter_size.value = 0
x.automatic_smoothing.value = False
x.watershed_method.value = ID.WA_NONE
img = np.zeros((40,40))
draw_circle(img, (10,10), 7, .5)
draw_circle(img, (30,30), 7, .5)
img[10,10] = 0
img[30,30] = 0
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertTrue(objects.segmented[10,10] == 0)
self.assertTrue(objects.segmented[30,30] == 0)
def test_02_05_01_fill_holes_within_holes(self):
'Regression test of img-1431'
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.size_range.min = 1
x.size_range.max = 2
x.exclude_size.value = False
x.fill_holes.value = ID.FH_DECLUMP
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.zeros((40,40))
draw_circle(img, (20,20), 10, .5)
draw_circle(img, (20,20), 4, 0)
img[20,20] = 1
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertTrue(objects.segmented[20,20] == 1)
self.assertTrue(objects.segmented[22,20] == 1)
self.assertTrue(objects.segmented[26,20] == 1)
def test_02_06_test_watershed_shape_shape(self):
"""Identify by local_maxima:shape & intensity:shape
Create an object whose intensity is high near the middle
but has an hourglass shape, then segment it using shape/shape
"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.maxima_suppression_size.value = 3
x.automatic_suppression.value = False
x.unclump_method.value = ID.UN_SHAPE
x.watershed_method.value = ID.WA_SHAPE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.6,.6,.6,.6,.6,.6,.6,.6,.6,.6, 0, 0, 0],
[ 0, 0, 0, 0,.7,.7,.7,.7,.7,.7,.7,.7, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.8,.9, 1, 1,.9,.8, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.7,.7,.7,.7,.7,.7,.7,.7, 0, 0, 0, 0],
[ 0, 0, 0,.6,.6,.6,.6,.6,.6,.6,.6,.6,.6, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),2)
def test_02_07_test_watershed_shape_intensity(self):
"""Identify by local_maxima:shape & watershed:intensity
Create an object with an hourglass shape to get two maxima, but
set the intensities so that one maximum gets the middle portion
"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.maxima_suppression_size.value = 3
x.automatic_suppression.value = False
x.unclump_method.value = ID.UN_SHAPE
x.watershed_method.value = ID.WA_INTENSITY
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0],
[ 0, 0, 0,.4,.4,.4,.5,.5,.5,.4,.4,.4,.4, 0, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0, 0],
[ 0, 0, 0, 0, 0,.4,.4,.4,.4,.4,.4, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),2)
self.assertEqual(objects.segmented[7,11],objects.segmented[7,4])
def test_02_08_test_watershed_intensity_distance_single(self):
"""Identify by local_maxima:intensity & watershed:shape - one object
Create an object with an hourglass shape and a peak in the middle.
It should be segmented into a single object.
"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (4,10)
x.fill_holes.value = False
x.maxima_suppression_size.value = 3.6
x.automatic_suppression.value = False
x.unclump_method.value = ID.UN_INTENSITY
x.watershed_method.value = ID.WA_SHAPE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.6,.6,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.6,.7,.7,.6,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0,.5,.6,.7,.8,.8,.7,.6,.5, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.7,.8,.9,.9,.8,.7, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.5,.6,.7,.8,.8,.7,.6,.5, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.6,.7,.7,.6,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.6,.6,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
# We do a little blur here so that there's some monotonic decrease
# from the central peak
img = scipy.ndimage.gaussian_filter(img, .25, mode='constant')
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),1)
def test_02_08_test_watershed_intensity_distance_triple(self):
"""Identify by local_maxima:intensity & watershed:shape - 3 objects w/o filter
Create an object with an hourglass shape and a peak in the middle.
It should be segmented into a single object.
"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.smoothing_filter_size.value = 0
x.automatic_smoothing.value = False
x.maxima_suppression_size.value = 7.1
x.automatic_suppression.value = False
x.unclump_method.value = ID.UN_INTENSITY
x.watershed_method.value = ID.WA_SHAPE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.8,.8,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.6,.6,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.6,.7,.7,.6,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0,.5,.6,.7,.8,.8,.7,.6,.5, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.7,.8,.9,.9,.8,.7, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.5,.6,.7,.8,.8,.7,.6,.5, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.6,.7,.7,.6,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.6,.6,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.8,.8,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),3)
def test_02_09_test_watershed_intensity_distance_filter(self):
"""Identify by local_maxima:intensity & watershed:shape - filtered
Create an object with an hourglass shape and a peak in the middle.
It should be segmented into a single object.
"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.smoothing_filter_size.value = 1
x.automatic_smoothing.value = 1
x.maxima_suppression_size.value = 3.6
x.automatic_suppression.value = False
x.unclump_method.value = ID.UN_INTENSITY
x.watershed_method.value = ID.WA_SHAPE
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_OTSU
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.6,.6,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.6,.7,.7,.6,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0,.5,.6,.7,.8,.8,.7,.6,.5, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.7,.8,.9,.9,.8,.7, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.5,.6,.7,.8,.8,.7,.6,.5, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.6,.7,.7,.6,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.6,.6,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),1)
def test_02_10_test_watershed_intensity_distance_double(self):
"""Identify by local_maxima:intensity & watershed:shape - two objects
Create an object with an hourglass shape and peaks in the top and
bottom, but with a distribution of values that's skewed so that,
by intensity, one of the images occupies the middle. The middle
should be shared because the watershed is done using the distance
transform.
"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.smoothing_filter_size.value = 0
x.automatic_smoothing.value = 0
x.maxima_suppression_size.value = 3.6
x.automatic_suppression.value = False
x.unclump_method.value = ID.UN_INTENSITY
x.watershed_method.value = ID.WA_SHAPE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.9,.9,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0],
[ 0, 0, 0,.4,.4,.4,.5,.5,.5,.4,.4,.4,.4, 0, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.9,.9,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0, 0],
[ 0, 0, 0, 0, 0,.4,.4,.4,.4,.4,.4, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
# We do a little blur here so that there's some monotonic decrease
# from the central peak
img = scipy.ndimage.gaussian_filter(img, .5, mode='constant')
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),2)
self.assertNotEqual(objects.segmented[12,7],objects.segmented[4,7])
def test_02_11_propagate(self):
"""Test the propagate unclump method"""
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.smoothing_filter_size.value = 0
x.automatic_smoothing.value = 0
x.maxima_suppression_size.value = 7
x.automatic_suppression.value = False
x.manual_threshold.value = .3
x.unclump_method.value = ID.UN_INTENSITY
x.watershed_method.value = ID.WA_PROPAGATE
x.threshold_scope.value = I.TS_MANUAL
x.threshold_smoothing_choice.value = I.TSM_NONE
img = np.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0],
[ 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5,.5,.9,.9,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0,.5,.5,.5,.5, 0, 0, 0, 0, 0, 0, 0,.5, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,.5, 0, 0],
[ 0, 0, 0, 0, 0, 0,.5,.5,.5,.5,.5,.5,.5,.5, 0, 0],
[ 0, 0, 0, 0, 0,.5,.5,.5,.5, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,.5,.5,.5,.5,.5, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0,.4,.4,.4,.5,.5,.5,.4,.4,.4,.4, 0, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.9,.9,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0],
[ 0, 0, 0,.4,.4,.4,.4,.4,.4,.4,.4,.4,.4, 0, 0, 0],
[ 0, 0, 0, 0, 0,.4,.4,.4,.4,.4,.4, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
# We do a little blur here so that there's some monotonic decrease
# from the central peak
img = scipy.ndimage.gaussian_filter(img, .5, mode='constant')
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(np.max(objects.segmented),2)
# This point has a closer "crow-fly" distance to the upper object
# but should be in the lower one because of the serpentine path
self.assertEqual(objects.segmented[14,9],objects.segmented[9,9])
def test_02_12_fly(self):
'''Run identify on the fly image'''
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9722
IdentifyPrimaryObjects:[module_num:1|svn_version:\'9633\'|variable_revision_number:6|show_window:True|notes:\x5B\x5D]
Select the input image:CropBlue
Name the primary objects to be identified:Nuclei
Typical diameter of objects, in pixel units (Min,Max):15,40
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Select the thresholding method:MoG Global
Threshold correction factor:1.6
Lower and upper bounds on threshold:0,1
Approximate fraction of image covered by objects?:0.2
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter:10
Suppress local maxima that are closer than this minimum allowed distance:5
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:None
Fill holes in identified objects?:Yes
Automatically calculate size of smoothing filter?:Yes
Automatically calculate minimum allowed distance between local maxima?:Yes
Manual threshold:0.0
Select binary image:MoG Global
Retain outlines of the identified objects?:No
Automatically calculate the threshold using the Otsu method?:Yes
Enter Laplacian of Gaussian threshold:.5
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter:5
Handling of objects if excessive number of objects identified:Continue
Maximum number of objects:500
"""
pipeline = cellprofiler.pipeline.Pipeline()
def callback(pipeline, event):
self.assertFalse(isinstance(event, (cellprofiler.pipeline.RunExceptionEvent,
cellprofiler.pipeline.LoadExceptionEvent)))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
x = pipeline.modules()[0]
self.assertTrue(isinstance(x, ID.IdentifyPrimaryObjects))
img = fly_image()[300:600,300:600]
image = cpi.Image(img)
#
# Make sure it runs both regular and with reduced image
#
for min_size in (9, 15):
#
# Exercise clumping / declumping options
#
x.size_range.min = min_size
for unclump_method in (ID.UN_INTENSITY, ID.UN_SHAPE, ID.UN_LOG):
x.unclump_method.value = unclump_method
for watershed_method in (ID.WA_INTENSITY, ID.WA_SHAPE, ID.WA_PROPAGATE):
x.watershed_method.value = watershed_method
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add(x.image_name.value, image)
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
def test_02_13_maxima_suppression_zero(self):
# Regression test for issue #877
# if maxima_suppression_size = 1 or 0, use a 4-connected structuring
# element.
#
img = np.array(
[[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, .1, 0, 0, .1, 0, 0, .1, 0, 0],
[ 0, .1, 0, 0, 0, .2, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, .1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
expected = np.array(
[[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 0, 0, 2, 0, 0, 3, 0, 0],
[ 0, 1, 0, 0, 0, 2, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
for distance in (0, 1):
x = ID.IdentifyPrimaryObjects()
x.image_name.value = "my_image"
x.object_name.value = "my_object"
x.exclude_size.value = False
x.size_range.value = (2,10)
x.fill_holes.value = False
x.smoothing_filter_size.value = 0
x.automatic_smoothing.value = 0
x.maxima_suppression_size.value = distance
x.automatic_suppression.value = False
x.manual_threshold.value = .05
x.unclump_method.value = ID.UN_INTENSITY
x.watershed_method.value = ID.WA_INTENSITY
x.threshold_scope.value = I.TS_MANUAL
x.threshold_smoothing_choice.value = I.TSM_NONE
pipeline = cellprofiler.pipeline.Pipeline()
x.module_num = 1
pipeline.add_module(x)
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
measurements.add(x.image_name.value, cpi.Image(img))
x.run(Workspace(pipeline, x, measurements, object_set, measurements,
None))
output = object_set.get_objects(x.object_name.value)
self.assertEqual(output.count, 4)
self.assertTrue(np.all(output.segmented[expected == 0] == 0))
self.assertEqual(len(np.unique(output.segmented[expected == 1])), 1)
def test_02_14_automatic(self):
# Regression test of issue 1071 - automatic should yield same
# threshold regardless of manual parameters
#
r = np.random.RandomState()
r.seed(214)
image = r.uniform(size = (20, 20))
workspace, module = self.make_workspace(image)
assert isinstance(module, ID.IdentifyPrimaryObjects)
module.threshold_scope.value = I.TS_AUTOMATIC
module.run(workspace)
m = workspace.measurements
orig_threshold = m[cpmeas.IMAGE, I.FF_FINAL_THRESHOLD % OBJECTS_NAME]
workspace, module = self.make_workspace(image)
module.threshold_scope.value = I.TS_AUTOMATIC
module.threshold_method.value = I.TM_OTSU
module.threshold_smoothing_choice.value = I.TSM_MANUAL
module.threshold_smoothing_scale.value = 100
module.threshold_correction_factor.value = .1
module.threshold_range.min = .8
module.threshold_range.max = .81
module.run(workspace)
m = workspace.measurements
threshold = m[cpmeas.IMAGE, I.FF_FINAL_THRESHOLD % OBJECTS_NAME]
self.assertEqual(threshold, orig_threshold)
def test_04_01_load_matlab_12(self):
"""Test loading a Matlab version 12 IdentifyPrimAutomatic pipeline
"""
old_r12_file = 'TUFUTEFCIDUuMCBNQVQtZmlsZSwgUGxhdGZvcm06IFBDV0lOLCBDcmVhdGVkIG9uOiBXZWQgRGVjIDMxIDExOjQxOjUxIDIwMDggICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAABSU0PAAAAuAEAAHicxVRdT8IwFO3GWEQNEYmJvu3RB2K2xAcfNTFRHgQihujjBoXUbC3ZWgM++TP8Of4Uf4ot7LMSNqfgTZpx7u45p/eytg4AMGsA6Py5w5cKllENsZJaAvchpQhPgirQwHGY/+BrYPvIdlw4sF0GAxBHlG/jMXmYT+NXd2TEXNixvXQxjw7zHOgH3XFEDF/30Ay6ffQKQTaisnv4ggJEcMgP9eVs7Euo5Fvn61NL5qCsmEMzlRf1lyCp11bU76bqD0J8TQxMqMECmOhc5Ojoko6+mNPQhagYvyrxBbbM1rkZ+ps5/EqGXwFPfHZFeGqGp4IO+Z1f3rz3pD4F7tKAGTcucWw3nneev5LRUYBVck5myyrE0zI8DZhnplWk35rUr8BtTCEOEJ2H+f/WuWKUeDZFww3obOo7Knpu/0pn2+fvTVl/zzVS+bJ9Is+ewIlP2DTRuc3RaUg6AhPnGQ7pQshAeASnqX1t+5m3/0Np/wITRl2E4bcGhN4MrP8f0vdQEf8jyV/g9ghiisbzno+89BmSvx89x1/lv5oreGXvzyJ++yV4Gme+nyx5jz+c7+ma+iii/BfqTY0Q'
pipeline = cellprofiler.modules.tests.load_pipeline(self, old_r12_file)
pipeline.add_listener(self.load_error_handler)
self.assertEqual(len(pipeline.modules()),1)
module = pipeline.module(1)
self.assertTrue(isinstance(module,ID.IdentifyPrimaryObjects))
self.assertTrue(module.threshold_algorithm,T.TM_OTSU)
self.assertTrue(module.threshold_modifier,T.TM_GLOBAL)
self.assertAlmostEqual(float(module.object_fraction.value),.01)
self.assertEqual(module.object_name.value,"Nuclei")
self.assertEqual(module.image_name.value,"Do not use")
self.assertTrue(module.exclude_size.value)
self.assertEqual(module.fill_holes.value, ID.FH_THRESHOLDING)
self.assertTrue(module.exclude_border_objects.value)
self.assertTrue(module.automatic_smoothing.value)
self.assertTrue(module.automatic_suppression.value)
self.assertFalse(module.merge_objects.value)
self.assertTrue(module.image_name == cellprofiler.settings.DO_NOT_USE)
self.assertFalse(module.should_save_outlines.value)
self.assertEqual(module.save_outlines.value, "None")
self.assertAlmostEqual(module.threshold_range.min, 0)
self.assertAlmostEqual(module.threshold_range.max, 1)
self.assertAlmostEqual(module.threshold_correction_factor.value, 1)
self.assertEqual(module.watershed_method.value, "Intensity")
self.assertEqual(module.unclump_method.value, "Intensity")
self.assertAlmostEqual(module.maxima_suppression_size.value, 5)
def test_04_001_load_matlab_regression(self):
'''A regression test on a pipeline that misloaded the outlines variable'''
data = ('eJzzdQzxcXRSMNUzUPB1DNFNy8xJ1VEIyEksScsvyrVSCHAO9/TTUX'
'AuSk0sSU1RyM+zUvDNz1PwKs1TMLBQMDS1MjayMjJTMDIwsFQgGTAw'
'evryMzAwbGNiYKiY8zbCMf+ygUjZpWVaOVrJzJ3O/JZFEsqiMhabMj'
'mUNi5Luqyiopf3SqxZOrwzeOsfqTo29zqpwtlL+m5KXed9zRexac3z'
'Pd9/7j1/Xt8viqHhpjCD1MkbPrs4p531SnV+EbPPpedhgkjkAr55Sz'
'/vn1zH68zzmyXWWWgxxxPd2eXNintn+X9yFy8REL7SmhxomXm34o57'
'4hNe48NfCvnPC+w8Yi+gsc3nrfCsRxyXFbb6f3x6syb21JLSaM/63d'
'sfHZxQsUL1r8eM+BfNU+v+st3jY/nbvCV+oWT1xzy22rR+xc/7i+aY'
'q1r4crafjutwT+e8qvVtWsr5p8ZMze8zZfw6a/cmxLM/X24bnnq3bY'
've9N0b/QXCHq9Xvbm9qFo/jYW9hrv8aPxxy7q3DFstvqlW68UfmOnb'
'biZ3+KLS0tACOS+LGLvlZQ4zZd1fHgy4eT6KcTmbnbrLq2MPfQM9Ht'
'y56yqTxnicJXbV9PORcm9m/V/1U/vwzckFO95s1Nh2X/hWu8rxlbfW'
'G9X1MPUxWll/cr6n/nxH8IfkyxZxmrdO/nw5x2Ju7JPjzEBn5x0IEE'
'g0E/9z8hi/akW/qo3e44SG5RUCzpvWtE5sCN9av+ury/H+yzMuPmHt'
'r+W1f7LH8mZTf2ndiwe9Thb9NR4TGjbn7v0d/l77avGCV+15dSvuJZ'
'f85Ig75PUtMVrO6Hfn1n9yutcac1/fWpTR4yTlv+r4Sbe5u9x+359w'
'XqyhLOjxhZRmi/xd6RdTlz2Re1VXv+ZRzK7S2/vMVfasSa1YlqDeH/'
'qzNP7x5aM/5c/fPVJ8//imqiKOrj2FkTb/kxwFC2cfe1savu7/rtJP'
'yq3M4TtWrDzyOeTQw03WDoyHD1fqH0n+2Lfo0XVlzv7TL8sz/jnpnl'
'afyW88ka9/zdp9/max52+Z//9VH5gW7l+6b8veb+e/Fd2NT9hcW7/P'
'zT67fOl/9tZZsgEA6Ux4DA==')
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.load(StringIO.StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()),3)
module = pipeline.modules()[1]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertTrue(module.should_save_outlines.value)
self.assertEqual(module.save_outlines.value, "NucleiOutlines")
def test_04_02_load_v1(self):
file = 'TUFUTEFCIDUuMCBNQVQtZmlsZSBQbGF0Zm9ybTogbnQsIENyZWF0ZWQgb246IE1vbiBBcHIgMDYgMTI6MzQ6MjQgMjAwOQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABSU0OAAAAoA0AAAYAAAAIAAAAAgAAAAAAAAAFAAAACAAAAAEAAAABAAAAAQAAAAgAAABTZXR0aW5ncwUABAAYAAAAAQAAAMAAAABWYXJpYWJsZVZhbHVlcwAAAAAAAAAAAABWYXJpYWJsZUluZm9UeXBlcwAAAAAAAABNb2R1bGVOYW1lcwAAAAAAAAAAAAAAAABOdW1iZXJzT2ZWYXJpYWJsZXMAAAAAAABQaXhlbFNpemUAAAAAAAAAAAAAAAAAAABWYXJpYWJsZVJldmlzaW9uTnVtYmVycwBNb2R1bGVSZXZpc2lvbk51bWJlcnMAAABNb2R1bGVOb3RlcwAAAAAAAAAAAAAAAAAOAAAAYAUAAAYAAAAIAAAAAQAAAAAAAAAFAAAACAAAAAEAAAAWAAAAAQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAEAAAAAQAAAAAAAAAQAAQATm9uZQ4AAAA4AAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAYAAAABAAAAAAAAABAAAAAGAAAATnVjbGVpAAAOAAAAOAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAFAAAAAQAAAAAAAAAQAAAABQAAADEwLDQwAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAwAAAAEAAAAAAAAAEAADAFllcwAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAACAAAAAQAAAAAAAAAQAAIATm8AAA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAABAAAwBZZXMADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACwAAAAEAAAAAAAAAEAAAAAsAAABPdHN1IEdsb2JhbAAAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAQAAAAEAAAAAAAAAEAABADEAAAAOAAAASAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAARAAAAAQAAAAAAAAAQAAAAEQAAADAuMDAwMDAwLDEuMDAwMDAwAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAQAAAABAAAAAAAAABAABAAwLjAxDgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACQAAAAEAAAAAAAAAEAAAAAkAAABJbnRlbnNpdHkAAAAAAAAADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACQAAAAEAAAAAAAAAEAAAAAkAAABJbnRlbnNpdHkAAAAAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAgAAAAEAAAAAAAAAEAACADEwAAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAABAAAAAQAAAAAAAAAQAAEANwAAAA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAABAAAwBZZXMADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACgAAAAEAAAAAAAAAEAAAAAoAAABEbyBub3QgdXNlAAAAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAwAAAAEAAAAAAAAAEAADAFllcwAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAADAAAAAQAAAAAAAAAQAAMAWWVzAA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAABAAAwBZZXMADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAwAAAAEAAAAAAAAAEAADADAuMAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAEAAAAAQAAAAAAAAAQAAQATm9uZQ4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAIAAAABAAAAAAAAABAAAgBObwAADgAAAEgFAAAGAAAACAAAAAEAAAAAAAAABQAAAAgAAAABAAAAFgAAAAEAAAAAAAAADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACgAAAAEAAAAAAAAAEAAAAAoAAABpbWFnZWdyb3VwAAAAAAAADgAAAEgAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAEQAAAAEAAAAAAAAAEAAAABEAAABvYmplY3Rncm91cCBpbmRlcAAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAABIAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAABIAAAABAAAAAAAAABAAAAASAAAAb3V0bGluZWdyb3VwIGluZGVwAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAKAAAAAQAAAAAAAAAQAAAACgAAAGltYWdlZ3JvdXAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAACgAAAABgAAAAgAAAABAAAAAAAAAAUAAAAIAAAAAQAAAAEAAAABAAAAAAAAAA4AAABwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAEAAAAABAAAAAAAAABAAAABAAAAAY2VsbHByb2ZpbGVyLm1vZHVsZXMuaWRlbnRpZnlwcmltYXV0b21hdGljLklkZW50aWZ5UHJpbUF1dG9tYXRpYw4AAAAwAAAABgAAAAgAAAAJAAAAAAAAAAUAAAAIAAAAAQAAAAEAAAABAAAAAAAAAAIAAQAWAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAAAAAABAAAAAAAAAAkAAAAIAAAAAAAAAAAA8D8OAAAAMAAAAAYAAAAIAAAACQAAAAAAAAAFAAAACAAAAAEAAAABAAAAAQAAAAAAAAACAAEAAQAAAA4AAAAwAAAABgAAAAgAAAALAAAAAAAAAAUAAAAIAAAAAQAAAAEAAAABAAAAAAAAAAQAAgAAAAAADgAAAFgAAAAGAAAACAAAAAEAAAAAAAAABQAAAAgAAAABAAAAAQAAAAEAAAAAAAAADgAAACgAAAAGAAAACAAAAAEAAAAAAAAABQAAAAgAAAAAAAAAAQAAAAEAAAAAAAAA'
pipeline = cellprofiler.modules.tests.load_pipeline(self,file)
pipeline.add_listener(self.load_error_handler)
self.assertEqual(len(pipeline.modules()),1)
module = pipeline.module(1)
self.assertTrue(isinstance(module,ID.IdentifyPrimaryObjects))
self.assertEqual(module.threshold_algorithm,T.TM_OTSU)
self.assertEqual(module.threshold_modifier,T.TM_GLOBAL)
self.assertTrue(module.image_name == 'None')
def test_04_03_load_v3(self):
data = ('eJztWVtP2zAUTktBMMbG9rJJaJIfYWurpIAGaAI6uku3tlTQcRFim9'
'u6rSfXrhKH0U1Ie9zP2k/aT1gc0jY1l4T0IpgaFKXn5HznO+f4Ehtn'
'k4VM8jVYjqsgmyzEKpggkCeQV5heXwOUR8GWjiBHZcDoGsgyCj6YFK'
'grQEusLS6taQmQUNVVJdgVSmcfWI+DZ4oyYT0nrTvsvBp35JDrFvIu'
'4hzTqjGuRJSnjv6Pde9BHcMiQXuQmMjoULT0aVphhWaj/SrLyiZBOV'
'h3G1tXzqwXkW5sV1pA53UenyKyi38gKYWW2Q46wQZm1ME7/mVtm5dx'
'iVfUoTHTqUNIqoOoy5xLL+zfKx37yCV1e+Syn3VkTMv4BJdNSACuw2'
'o7CuFP9fA31uVvTEnlkjZu0wM3K8Uh7gI65bE3p7DEQR3yUk34WfHw'
'MyH5EXLOLBGE/cUf6sKHlEUnby/ecYlXyJoaXVJ7wKczmU/ZgHU/tF'
'rNDy7chQsrOeaP7yqcV397IuUp5BSqQJNwkBadDaSwjkqc6c2+5T0h'
'4VpXCzflPP3002kpfiFvc8ME7wgrQtL2M6j2knFaXO2JL8j8oMZV+4'
'pqzg9X/bz6+aTkT8hbNUgpIgk/eUS68BERizbIeWlKilfIacoRNTBv'
'uvK+6byiKf76W7/45brlGEVBxrmmnvP98sB9lOIW8uf5jfwrsXBA6/'
'EXC1+EtI8I2WHf14+SsfzxQkuzxYhZp+tHamz1+KcWTZydG+9iC2kr'
'FwLX/aWDq3ngVqT4hSxiOERQdwJbOluICZW14OE1R5dwdCnY7Ghu4z'
'x2T8pPyCkGKOPANFDHT1D+Yec74uvmUy/5LvSTz8980k8+P+uU/6v9'
'lgc6/meU7vEv5EJNRwiUCDQMe83fC3+QdcU+wtWa2EaeiA0TLSGXv2'
'HOg2+Zjqo6M2m54+fg/s32XcOM196kiYAbvfMHaTdW/Gat2O0AgLV3'
'RI0+1GGEG+FGuN5xmy6c3/+7dOaT8+F8l/Id4W4Hzus78ljp7ndCZi'
'YnmKILH5K7lPcIN9z5a9DroRFuhBsGbjJ09f5C3v8K+6/K9ePiudI9'
'LoRcQoQ0dCbO7/R43T5kMuKEwfL5KU88Y/1Muw587PMmD55NiWfzKh'
'5cRpTjSrOhW2wmZ3XIcSmedrR5S5tsaeU6Tl3C665H2Pp7OHd9/eW6'
'd9rj70YQvvDYRb5pD1zEqaDA/VZu1t7z19i3cgtq/w8+vUjz')
fd = StringIO.StringIO(zlib.decompress(base64.b64decode(data)))
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_listener(self.load_error_handler)
pipeline.load(fd)
self.assertEqual(len(pipeline.modules()),2)
module = pipeline.modules()[1]
self.assertTrue(isinstance(module,ID.IdentifyPrimaryObjects))
self.assertTrue(module.threshold_algorithm,T.TM_OTSU)
self.assertTrue(module.threshold_modifier,T.TM_GLOBAL)
self.assertEqual(module.two_class_otsu.value, I.O_THREE_CLASS)
self.assertEqual(module.use_weighted_variance.value,
I.O_WEIGHTED_VARIANCE)
self.assertEqual(module.assign_middle_to_foreground.value,
I.O_FOREGROUND)
def test_04_04_load_v4(self):
data = ('eJztWd1u0zAUdrtu2piGBjdwM+RLhLYo2VYxekPLyqCo7SZWhrjDTd3WyLWr'
'xJlWnoBLHolLHodHIO6SNvG6JUvaiaGkitLj+Dvf+bFP7KRRadUrb2BR02Gj'
'0trpEorhCUWiy61BCTKxDQ8tjATuQM5K8Mgi8IND4e4+NIolQy/t7cFdXX8F'
'kh25WuOhe/n5DIAV97rqnnnv1rIn5wKnlE+xEIT17GVQAE+99l/ueYYsgtoU'
'nyHqYHtK4bfXWJe3RsPJrQbvOBQ30SDY2T2azqCNLfu46wO92yfkAtNT8h0r'
'LvjdPuJzYhPOPLynX22d8HKh8Mo4/N6YxiGnxEHGZSvQLvu/B9P+hRlxexTo'
'v+nJhHXIOek4iEIyQL2JFVKfHqFvKaRvCVSblTGuHIHbVOyQZwtfiJ23F8gU'
'cICE2Zd6DiL0rCh6pNx0TIpJPPtzIXwO7Hl+R/EuK7xSNvTtfT0Fvlavf2ok'
'jPsXN2txcPkQLg+aPB7fdbio8fZE8VPKVdxFDhWwJgcbrBILm4Jbo7n5vaLg'
'/MPHrXnXOON0XbFfysfCduA7ytuITvQsKl8qztD0VHxJ6oOu6eNj2/D+BOK3'
'KL8LIVxB2mDEmVerIGy/lA/7iDFMd+Pke03BS7nGBGY2EaMUfseti/PiV+ua'
'EROnznNDj4dT89XkDCex82VMO5PyzWs8+nzlCNwDEM6nlKscMi6gY3sLhzT1'
'667rZcYX5tNn1ON58sUZ5/Pki7M++L/yV1zo+mEDhOe/lFt9C2NoUmTb47V2'
'Gv4kz/PPmPT6cvt2LjcqzMQBfYuKw6w6eMQt3LO4wzrp+f+1caY+14oe7uCW'
'+7m7zMd48ycTMkzPn2Rc8vY3dycwNgC6e1I8nEMcMlyGy3D3D1cO4OK+P5rW'
'r8vycZ/8zXDJniOPQXgcSJk7ghKGrzxI7pPfGe5u68mi10MZLsNluPS41dz1'
'+yf1/YXs/zXAM2vevwDheS9lE1M6tLj87mlpg/HHOVujHHUuv45pdfdvLfCh'
'TPIMI3jKCk/5Oh7SwUyQ7mhouWyO4AMkiKnVvNYTt7Xit6pxXJvBG4xH3v1t'
'bt0cfzXu03z8eZ2Eb6lwlW89AlfwIihxP8Dt8v38hv6+b0n7/wXQ1Cms')
pipeline = cellprofiler.pipeline.Pipeline()
def callback(caller,event):
self.assertFalse(
isinstance(event, cellprofiler.pipeline.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(
StringIO.StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()),2)
module = pipeline.modules()[1]
self.assertTrue(isinstance(module,ID.IdentifyPrimaryObjects))
self.assertTrue(module.threshold_algorithm,T.TM_OTSU)
self.assertTrue(module.threshold_modifier,T.TM_GLOBAL)
self.assertEqual(module.two_class_otsu.value, I.O_THREE_CLASS)
self.assertEqual(module.use_weighted_variance.value,
I.O_WEIGHTED_VARIANCE)
self.assertEqual(module.assign_middle_to_foreground.value,
I.O_FOREGROUND)
def test_04_05_load_v5(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9008
LoadImages:[module_num:1|svn_version:\'8947\'|variable_revision_number:4|show_window:True|notes:\x5B\x5D]
What type of files are you loading?:individual images
How do you want to load these files?:Text-Exact match
How many images are there in each group?:3
Type the text that the excluded images have in common:Do not use
Analyze all subfolders within the selected folder?:No
Image location:Default Image Folder
Enter the full path to the images:
Do you want to check image sets for missing or duplicate files?:Yes
Do you want to group image sets by metadata?:No
Do you want to exclude certain files?:No
What metadata fields do you want to group by?:
Type the text that these images have in common (case-sensitive):
What do you want to call this image in CellProfiler?:DNA
What is the position of this image in each group?:1
Do you want to extract metadata from the file name, the subfolder path or both?:None
Type the regular expression that finds metadata in the file name\x3A:^(?P<Plate>.*)_(?P<Well>\x5BA-P\x5D\x5B0-9\x5D{2})_s(?P<Site>\x5B0-9\x5D)
Type the regular expression that finds metadata in the subfolder path\x3A:.*\x5B\\\\/\x5D(?P<Date>.*)\x5B\\\\/\x5D(?P<Run>.*)$
IdentifyPrimaryObjects:[module_num:2|svn_version:\'8981\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the input image:DNA
Name the identified primary objects:MyObjects
Typical diameter of objects, in pixel units (Min,Max)\x3A:12,42
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Select the thresholding method:RobustBackground Global
Threshold correction factor:1.2
Lower and upper bounds on threshold\x3A:0.1,0.6
Approximate fraction of image covered by objects?:0.01
Method to distinguish clumped objects:Shape
Method to draw dividing lines between clumped objects:Distance
Size of smoothing filter\x3A:10
Suppress local maxima within this distance\x3A:7
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:MyOutlines
Fill holes in identified objects?:Yes
Automatically calculate size of smoothing filter?:Yes
Automatically calculate minimum size of local maxima?:Yes
Enter manual threshold\x3A:0.0
Select binary image\x3A:MyBinaryImage
Save outlines of the identified objects?:No
Calculate the Laplacian of Gaussian threshold automatically?:Yes
Enter Laplacian of Gaussian threshold\x3A:0.5
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter\x3A :5
How do you want to handle images with large numbers of objects?:Truncate
Maximum # of objects\x3A:305
IdentifyPrimaryObjects:[module_num:3|svn_version:\'8981\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the input image:DNA
Name the identified primary objects:MyObjects
Typical diameter of objects, in pixel units (Min,Max)\x3A:12,42
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Select the thresholding method:Otsu Adaptive
Threshold correction factor:1.2
Lower and upper bounds on threshold\x3A:0.1,0.6
Approximate fraction of image covered by objects?:0.01
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Propagate
Size of smoothing filter\x3A:10
Suppress local maxima within this distance\x3A:7
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:MyOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum size of local maxima?:No
Enter manual threshold\x3A:0.0
Select binary image\x3A:MyBinaryImage
Save outlines of the identified objects?:Yes
Calculate the Laplacian of Gaussian threshold automatically?:No
Enter Laplacian of Gaussian threshold\x3A:0.5
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter\x3A :5
How do you want to handle images with large numbers of objects?:Erase
Maximum # of objects\x3A:305
"""
pipeline = cellprofiler.pipeline.Pipeline()
def callback(caller,event):
self.assertFalse(
isinstance(event, cellprofiler.pipeline.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()),3)
module = pipeline.modules()[1]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.image_name, "DNA")
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.size_range.min, 12)
self.assertEqual(module.size_range.max, 42)
self.assertTrue(module.exclude_size)
self.assertFalse(module.merge_objects)
self.assertTrue(module.exclude_border_objects)
self.assertEqual(module.threshold_algorithm, T.TM_ROBUST_BACKGROUND)
self.assertEqual(module.threshold_modifier, T.TM_GLOBAL)
self.assertAlmostEqual(module.threshold_correction_factor.value, 1.2)
self.assertAlmostEqual(module.threshold_range.min, 0.1)
self.assertAlmostEqual(module.threshold_range.max, 0.6)
self.assertEqual(module.object_fraction.value, "0.01")
self.assertEqual(module.unclump_method, ID.UN_SHAPE)
self.assertEqual(module.watershed_method, ID.WA_SHAPE)
self.assertEqual(module.smoothing_filter_size, 10)
self.assertEqual(module.maxima_suppression_size, 7)
self.assertFalse(module.should_save_outlines)
self.assertEqual(module.save_outlines, "MyOutlines")
self.assertEqual(module.fill_holes, ID.FH_THRESHOLDING)
self.assertTrue(module.automatic_smoothing)
self.assertTrue(module.automatic_suppression)
self.assertEqual(module.manual_threshold, 0)
self.assertEqual(module.binary_image, "MyBinaryImage")
self.assertTrue(module.wants_automatic_log_threshold)
self.assertAlmostEqual(module.manual_log_threshold.value, 0.5)
self.assertEqual(module.two_class_otsu, I.O_TWO_CLASS)
self.assertEqual(module.use_weighted_variance, I.O_WEIGHTED_VARIANCE)
self.assertEqual(module.assign_middle_to_foreground, I.O_FOREGROUND)
self.assertTrue(module.wants_automatic_log_diameter)
self.assertEqual(module.log_diameter, 5)
self.assertEqual(module.limit_choice, ID.LIMIT_TRUNCATE)
self.assertEqual(module.maximum_object_count, 305)
module = pipeline.modules()[2]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.image_name, "DNA")
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.size_range.min, 12)
self.assertEqual(module.size_range.max, 42)
self.assertFalse(module.exclude_size)
self.assertTrue(module.merge_objects)
self.assertFalse(module.exclude_border_objects)
self.assertEqual(module.threshold_algorithm, T.TM_OTSU)
self.assertEqual(module.threshold_modifier, T.TM_ADAPTIVE)
self.assertAlmostEqual(module.threshold_correction_factor.value, 1.2)
self.assertAlmostEqual(module.threshold_range.min, 0.1)
self.assertAlmostEqual(module.threshold_range.max, 0.6)
self.assertEqual(module.object_fraction.value, "0.01")
self.assertEqual(module.unclump_method, ID.UN_INTENSITY)
self.assertEqual(module.watershed_method, ID.WA_PROPAGATE)
self.assertEqual(module.smoothing_filter_size, 10)
self.assertEqual(module.maxima_suppression_size, 7)
self.assertTrue(module.should_save_outlines)
self.assertEqual(module.save_outlines, "MyOutlines")
self.assertEqual(module.fill_holes, ID.FH_NEVER)
self.assertFalse(module.automatic_smoothing)
self.assertFalse(module.automatic_suppression)
self.assertEqual(module.manual_threshold, 0)
self.assertEqual(module.binary_image, "MyBinaryImage")
self.assertFalse(module.wants_automatic_log_threshold)
self.assertAlmostEqual(module.manual_log_threshold.value, 0.5)
self.assertEqual(module.two_class_otsu, I.O_THREE_CLASS)
self.assertEqual(module.use_weighted_variance, I.O_ENTROPY)
self.assertEqual(module.assign_middle_to_foreground, I.O_BACKGROUND)
self.assertFalse(module.wants_automatic_log_diameter)
self.assertEqual(module.log_diameter, 5)
self.assertEqual(module.limit_choice, ID.LIMIT_ERASE)
self.assertEqual(module.maximum_object_count, 305)
# Missing tests for versions 6-9 (!)
def test_04_10_load_v10(self):
# Sorry about this overly-long pipeline, it seemed like we need to
# revisit many of the choices.
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:3
DateRevision:20130226215424
ModuleCount:11
HasImagePlaneDetails:False
Images:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:1|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
:
Filter based on rules:No
Filter:or (file does contain "")
Metadata:[module_num:2|svn_version:\'Unknown\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Extract metadata?:Yes
Extraction method count:2
Extraction method:Automatic
Source:From file name
Regular expression:^(?P<Plate>.*)_(?P<Well>\x5BA-P\x5D\x5B0-9\x5D{2})_s(?P<Site>\x5B0-9\x5D)_w(?P<ChannelNumber>\x5B0-9\x5D)
Regular expression:(?P<Date>\x5B0-9\x5D{4}_\x5B0-9\x5D{2}_\x5B0-9\x5D{2})$
Filter images:All images
:or (file does contain "")
Metadata file location\x3A:
Match file and image metadata:\x5B\x5D
Case insensitive matching:No
Extraction method:Manual
Source:From file name
Regular expression:^(?P<StackName>\x5B^.\x5D+).flex
Regular expression:(?P<Date>\x5B0-9\x5D{4}_\x5B0-9\x5D{2}_\x5B0-9\x5D{2})$
Filter images:All images
:or (file does contain "")
Metadata file location\x3A:
Match file and image metadata:\x5B\x5D
Case insensitive matching:No
NamesAndTypes:[module_num:3|svn_version:\'Unknown\'|variable_revision_number:1|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Assignment method:Assign all images
Load as:Grayscale image
Image name:Channel0
:\x5B\x5D
Assign channels by:Order
Assignments count:1
Match this rule:or (metadata does StackName "")
Image name:DNA
Objects name:Cell
Load as:Grayscale image
Groups:[module_num:4|svn_version:\'Unknown\'|variable_revision_number:2|show_window:False|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Do you want to group your images?:Yes
grouping metadata count:1
Metadata category:StackName
IdentifyPrimaryObjects:[module_num:5|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel0
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:CellOutlines
Fill holes in identified objects?:Yes
Automatically calculate size of smoothing filter?:Yes
Automatically calculate minimum allowed distance between local maxima?:Yes
Retain outlines of the identified objects?:No
Automatically calculate the threshold using the Otsu method?:Yes
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Continue
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Adaptive
Threshold method:Otsu
Smoothing for threshold:Automatic
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.90
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Method to calculate adaptive window size:Image size
Size of adaptive window:12
IdentifyPrimaryObjects:[module_num:6|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel1
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Method to distinguish clumped objects:Laplacian of Gaussian
Method to draw dividing lines between clumped objects:None
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:CellOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum allowed distance between local maxima?:No
Retain outlines of the identified objects?:Yes
Automatically calculate the threshold using the Otsu method?:No
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Erase
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Automatic
Threshold method:MCT
Smoothing for threshold:Manual
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.09
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Method to calculate adaptive window size:Custom
Size of adaptive window:12
IdentifyPrimaryObjects:[module_num:7|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel2
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Method to distinguish clumped objects:None
Method to draw dividing lines between clumped objects:Propagate
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:CellOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum allowed distance between local maxima?:No
Retain outlines of the identified objects?:Yes
Automatically calculate the threshold using the Otsu method?:No
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Truncate
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Binary image
Threshold method:MoG
Smoothing for threshold:No smoothing
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.09
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Method to calculate adaptive window size:Custom
Size of adaptive window:12
IdentifyPrimaryObjects:[module_num:8|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel3
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Method to distinguish clumped objects:Shape
Method to draw dividing lines between clumped objects:Shape
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:CellOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum allowed distance between local maxima?:No
Retain outlines of the identified objects?:Yes
Automatically calculate the threshold using the Otsu method?:No
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Truncate
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Global
Threshold method:Background
Smoothing for threshold:No smoothing
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.09
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Method to calculate adaptive window size:Custom
Size of adaptive window:12
IdentifyPrimaryObjects:[module_num:9|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel4
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Method to distinguish clumped objects:Shape
Method to draw dividing lines between clumped objects:Shape
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:CellOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum allowed distance between local maxima?:No
Retain outlines of the identified objects?:Yes
Automatically calculate the threshold using the Otsu method?:No
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Truncate
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Manual
Threshold method:Kapur
Smoothing for threshold:No smoothing
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.09
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Method to calculate adaptive window size:Custom
Size of adaptive window:12
IdentifyPrimaryObjects:[module_num:10|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel4
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Method to distinguish clumped objects:Shape
Method to draw dividing lines between clumped objects:Shape
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:CellOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum allowed distance between local maxima?:No
Retain outlines of the identified objects?:Yes
Automatically calculate the threshold using the Otsu method?:No
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Truncate
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Measurement
Threshold method:RidlerCalvard
Smoothing for threshold:No smoothing
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.09
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Method to calculate adaptive window size:Custom
Size of adaptive window:12
IdentifyPrimaryObjects:[module_num:11|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the input image:Channel4
Name the primary objects to be identified:Cells
Typical diameter of objects, in pixel units (Min,Max):15,45
Discard objects outside the diameter range?:No
Try to merge too small objects with nearby larger objects?:Yes
Discard objects touching the border of the image?:No
Method to distinguish clumped objects:Shape
Method to draw dividing lines between clumped objects:Shape
Size of smoothing filter:11
Suppress local maxima that are closer than this minimum allowed distance:9
Speed up by using lower-resolution image to find local maxima?:No
Name the outline image:CellOutlines
Fill holes in identified objects?:No
Automatically calculate size of smoothing filter?:No
Automatically calculate minimum allowed distance between local maxima?:No
Retain outlines of the identified objects?:Yes
Automatically calculate the threshold using the Otsu method?:No
Enter Laplacian of Gaussian threshold:0.2
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:No
Enter LoG filter diameter:3
Handling of objects if excessive number of objects identified:Truncate
Maximum number of objects:499
Threshold setting version:1
Threshold strategy:Per object
Threshold method:RobustBackground
Smoothing for threshold:No smoothing
Threshold smoothing scale:2.0
Threshold correction factor:.80
Lower and upper bounds on threshold:0.01,0.09
Approximate fraction of image covered by objects?:0.05
Manual threshold:0.03
Select the measurement to threshold with:Metadata_Threshold
Select binary image:Segmentation
Masking objects:Wells
Two-class or three-class thresholding?:Three classes
Minimize the weighted variance or the entropy?:Entropy
Assign pixels in the middle intensity class to the foreground or the background?:Background
Method to calculate adaptive window size:Custom
Size of adaptive window:12
"""
pipeline = cellprofiler.pipeline.Pipeline()
def callback(caller,event):
self.assertFalse(
isinstance(event, cellprofiler.pipeline.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
module = pipeline.modules()[4]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.image_name, "Channel0")
self.assertEqual(module.object_name, "Cells")
self.assertEqual(module.size_range.min, 15)
self.assertEqual(module.size_range.max, 45)
self.assertTrue(module.exclude_size)
self.assertFalse(module.merge_objects)
self.assertTrue(module.exclude_border_objects)
self.assertEqual(module.unclump_method, ID.UN_INTENSITY)
self.assertEqual(module.watershed_method, ID.WA_INTENSITY)
self.assertTrue(module.automatic_smoothing)
self.assertEqual(module.smoothing_filter_size, 11)
self.assertTrue(module.automatic_suppression)
self.assertEqual(module.maxima_suppression_size, 9)
self.assertTrue(module.low_res_maxima)
self.assertFalse(module.should_save_outlines)
self.assertEqual(module.save_outlines, "CellOutlines")
self.assertEqual(module.fill_holes, ID.FH_THRESHOLDING)
self.assertTrue(module.wants_automatic_log_threshold)
self.assertEqual(module.manual_log_threshold, .2)
self.assertTrue(module.wants_automatic_log_diameter)
self.assertEqual(module.log_diameter, 3)
self.assertEqual(module.limit_choice, ID.LIMIT_NONE)
self.assertEqual(module.maximum_object_count, 499)
#
self.assertEqual(module.threshold_scope, I.TS_ADAPTIVE)
self.assertEqual(module.threshold_method, I.TM_OTSU)
self.assertEqual(module.threshold_smoothing_choice, I.TSM_AUTOMATIC)
self.assertEqual(module.threshold_smoothing_scale, 2.0)
self.assertAlmostEqual(module.threshold_correction_factor, .80)
self.assertAlmostEqual(module.threshold_range.min, 0.01)
self.assertAlmostEqual(module.threshold_range.max, 0.90)
self.assertAlmostEqual(module.object_fraction, 0.05)
self.assertAlmostEqual(module.manual_threshold, 0.03)
self.assertEqual(module.thresholding_measurement, "Metadata_Threshold")
self.assertEqual(module.binary_image, "Segmentation")
self.assertEqual(module.masking_objects, "Wells")
self.assertEqual(module.two_class_otsu, I.O_TWO_CLASS)
self.assertEqual(module.use_weighted_variance, I.O_WEIGHTED_VARIANCE)
self.assertEqual(module.assign_middle_to_foreground, I.O_FOREGROUND)
self.assertEqual(module.adaptive_window_method, I.FI_IMAGE_SIZE)
self.assertEqual(module.adaptive_window_size, 12)
#
# Test alternate settings using subsequent instances of IDPrimary
#
module = pipeline.modules()[5]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertFalse(module.exclude_size)
self.assertTrue(module.merge_objects)
self.assertFalse(module.exclude_border_objects)
self.assertEqual(module.unclump_method, ID.UN_LOG)
self.assertEqual(module.watershed_method, ID.WA_NONE)
self.assertFalse(module.automatic_smoothing)
self.assertFalse(module.automatic_suppression)
self.assertFalse(module.low_res_maxima)
self.assertTrue(module.should_save_outlines)
self.assertEqual(module.fill_holes, ID.FH_NEVER)
self.assertFalse(module.wants_automatic_log_threshold)
self.assertFalse(module.wants_automatic_log_diameter)
self.assertEqual(module.limit_choice, ID.LIMIT_ERASE)
self.assertEqual(module.threshold_scope, I.TS_AUTOMATIC)
self.assertEqual(module.threshold_method, I.TM_MCT)
self.assertEqual(module.threshold_smoothing_choice, I.TSM_MANUAL)
self.assertEqual(module.two_class_otsu, I.O_THREE_CLASS)
self.assertEqual(module.use_weighted_variance, I.O_ENTROPY)
self.assertEqual(module.assign_middle_to_foreground, I.O_BACKGROUND)
self.assertEqual(module.adaptive_window_method, I.FI_CUSTOM)
module = pipeline.modules()[6]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.unclump_method, ID.UN_NONE)
self.assertEqual(module.watershed_method, ID.WA_PROPAGATE)
self.assertEqual(module.limit_choice, ID.LIMIT_TRUNCATE)
self.assertEqual(module.threshold_scope, I.TS_BINARY_IMAGE)
self.assertEqual(module.threshold_method, I.TM_MOG)
self.assertEqual(module.threshold_smoothing_choice, I.TSM_NONE)
module = pipeline.modules()[7]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.unclump_method, ID.UN_SHAPE)
self.assertEqual(module.watershed_method, ID.WA_SHAPE)
self.assertEqual(module.threshold_scope, I.TS_GLOBAL)
self.assertEqual(module.threshold_method, T.TM_BACKGROUND)
module = pipeline.modules()[8]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.threshold_scope, I.TS_MANUAL)
self.assertEqual(module.threshold_method, T.TM_KAPUR)
module = pipeline.modules()[9]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.threshold_scope, I.TS_MEASUREMENT)
self.assertEqual(module.threshold_method, T.TM_RIDLER_CALVARD)
module = pipeline.modules()[10]
self.assertTrue(isinstance(module, ID.IdentifyPrimaryObjects))
self.assertEqual(module.threshold_scope, I.TS_PER_OBJECT)
self.assertEqual(module.threshold_method, T.TM_ROBUST_BACKGROUND)
self.assertEqual(module.rb_custom_choice, I.RB_DEFAULT)
self.assertEqual(module.lower_outlier_fraction, .05)
self.assertEqual(module.upper_outlier_fraction, .05)
self.assertEqual(module.averaging_method, I.RB_MEAN)
self.assertEqual(module.variance_method, I.RB_SD)
self.assertEqual(module.number_of_deviations, 2)
def test_04_10_01_load_new_robust_background(self):
#
# Test custom robust background parameters.
#
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:3
DateRevision:20141114191709
GitHash:d186f20
ModuleCount:3
HasImagePlaneDetails:False
IdentifyPrimaryObjects:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
Select the input image:DNA
Name the primary objects to be identified:Nuclei
Typical diameter of objects, in pixel units (Min,Max):10,40
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter:10
Suppress local maxima that are closer than this minimum allowed distance:7.0
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:PrimaryOutlines
Fill holes in identified objects?:After both thresholding and declumping
Automatically calculate size of smoothing filter for declumping?:Yes
Automatically calculate minimum allowed distance between local maxima?:Yes
Retain outlines of the identified objects?:No
Automatically calculate the threshold using the Otsu method?:Yes
Enter Laplacian of Gaussian threshold:0.5
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter:5.0
Handling of objects if excessive number of objects identified:Continue
Maximum number of objects:500
Threshold setting version:2
Threshold strategy:Global
Thresholding method:RobustBackground
Select the smoothing method for thresholding:Automatic
Threshold smoothing scale:1.0
Threshold correction factor:1.0
Lower and upper bounds on threshold:0.0,1.0
Approximate fraction of image covered by objects?:0.01
Manual threshold:0.0
Select the measurement to threshold with:None
Select binary image:None
Masking objects:None
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Method to calculate adaptive window size:Image size
Size of adaptive window:10
Use default parameters?:Custom
Lower outlier fraction:0.1
Upper outlier fraction:0.2
Averaging method:Mean
Variance method:Standard deviation
# of deviations:2.5
IdentifyPrimaryObjects:[module_num:2|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
Select the input image:DNA
Name the primary objects to be identified:Nuclei
Typical diameter of objects, in pixel units (Min,Max):10,40
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter:10
Suppress local maxima that are closer than this minimum allowed distance:7.0
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:PrimaryOutlines
Fill holes in identified objects?:After both thresholding and declumping
Automatically calculate size of smoothing filter for declumping?:Yes
Automatically calculate minimum allowed distance between local maxima?:Yes
Retain outlines of the identified objects?:No
Automatically calculate the threshold using the Otsu method?:Yes
Enter Laplacian of Gaussian threshold:0.5
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter:5.0
Handling of objects if excessive number of objects identified:Continue
Maximum number of objects:500
Threshold setting version:2
Threshold strategy:Global
Thresholding method:RobustBackground
Select the smoothing method for thresholding:Automatic
Threshold smoothing scale:1.0
Threshold correction factor:1.0
Lower and upper bounds on threshold:0.0,1.0
Approximate fraction of image covered by objects?:0.01
Manual threshold:0.0
Select the measurement to threshold with:None
Select binary image:None
Masking objects:None
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Method to calculate adaptive window size:Image size
Size of adaptive window:10
Use default parameters?:Custom
Lower outlier fraction:0.1
Upper outlier fraction:0.2
Averaging method:Median
Variance method:Median absolute deviation
# of deviations:2.5
IdentifyPrimaryObjects:[module_num:3|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True|wants_pause:False]
Select the input image:DNA
Name the primary objects to be identified:Nuclei
Typical diameter of objects, in pixel units (Min,Max):10,40
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter:10
Suppress local maxima that are closer than this minimum allowed distance:7.0
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:PrimaryOutlines
Fill holes in identified objects?:After both thresholding and declumping
Automatically calculate size of smoothing filter for declumping?:Yes
Automatically calculate minimum allowed distance between local maxima?:Yes
Retain outlines of the identified objects?:No
Automatically calculate the threshold using the Otsu method?:Yes
Enter Laplacian of Gaussian threshold:0.5
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter:5.0
Handling of objects if excessive number of objects identified:Continue
Maximum number of objects:500
Threshold setting version:2
Threshold strategy:Global
Thresholding method:RobustBackground
Select the smoothing method for thresholding:Automatic
Threshold smoothing scale:1.0
Threshold correction factor:1.0
Lower and upper bounds on threshold:0.0,1.0
Approximate fraction of image covered by objects?:0.01
Manual threshold:0.0
Select the measurement to threshold with:None
Select binary image:None
Masking objects:None
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Method to calculate adaptive window size:Image size
Size of adaptive window:10
Use default parameters?:Custom
Lower outlier fraction:0.1
Upper outlier fraction:0.2
Averaging method:Mode
Variance method:Median absolute deviation
# of deviations:2.5
"""
pipeline = cellprofiler.pipeline.Pipeline()
def callback(caller,event):
self.assertFalse(
isinstance(event, cellprofiler.pipeline.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
for module, averaging_method, variance_method in zip(
pipeline.modules(),
(I.RB_MEAN, I.RB_MEDIAN, I.RB_MODE),
(I.RB_SD, I.RB_MAD, I.RB_MAD)):
assert isinstance(module, ID.IdentifyPrimaryObjects)
self.assertEqual(module.lower_outlier_fraction, .1)
self.assertEqual(module.upper_outlier_fraction, .2)
self.assertEqual(module.number_of_deviations, 2.5)
self.assertEqual(module.averaging_method, averaging_method)
self.assertEqual(module.variance_method, variance_method)
def test_05_01_discard_large(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = True
x.size_range.min = 10
x.size_range.max = 40
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_MANUAL
x.manual_threshold.value = .3
img = np.zeros((200,200))
draw_circle(img,(100,100),25,.5)
draw_circle(img,(25,25),10,.5)
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(objects.segmented[25,25],1,"The small object was not there")
self.assertEqual(objects.segmented[100,100],0,"The large object was not filtered out")
self.assertTrue(objects.small_removed_segmented[25,25]>0,"The small object was not in the small_removed label set")
self.assertTrue(objects.small_removed_segmented[100,100]>0,"The large object was not in the small-removed label set")
self.assertTrue(objects.unedited_segmented[25,25],"The small object was not in the unedited set")
self.assertTrue(objects.unedited_segmented[100,100],"The large object was not in the unedited set")
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),1)
def test_05_02_keep_large(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.size_range.min = 10
x.size_range.max = 40
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_MANUAL
x.manual_threshold.value = .3
img = np.zeros((200,200))
draw_circle(img,(100,100),25,.5)
draw_circle(img,(25,25),10,.5)
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertTrue(objects.segmented[25,25],"The small object was not there")
self.assertTrue(objects.segmented[100,100],"The large object was filtered out")
self.assertTrue(objects.unedited_segmented[25,25],"The small object was not in the unedited set")
self.assertTrue(objects.unedited_segmented[100,100],"The large object was not in the unedited set")
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),2)
def test_05_03_discard_small(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = True
x.size_range.min = 40
x.size_range.max = 60
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_MANUAL
x.manual_threshold.value = .3
img = np.zeros((200,200))
draw_circle(img,(100,100),25,.5)
draw_circle(img,(25,25),10,.5)
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
self.assertEqual(objects.segmented[25,25],0,"The small object was not filtered out")
self.assertEqual(objects.segmented[100,100],1,"The large object was not present")
self.assertTrue(objects.small_removed_segmented[25,25]==0,"The small object was in the small_removed label set")
self.assertTrue(objects.small_removed_segmented[100,100]>0,"The large object was not in the small-removed label set")
self.assertTrue(objects.unedited_segmented[25,25],"The small object was not in the unedited set")
self.assertTrue(objects.unedited_segmented[100,100],"The large object was not in the unedited set")
location_center_x = measurements.get_current_measurement("my_object","Location_Center_X")
self.assertTrue(isinstance(location_center_x,np.ndarray))
self.assertEqual(np.product(location_center_x.shape),1)
def test_05_02_discard_edge(self):
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.size_range.min = 10
x.size_range.max = 40
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_MANUAL
x.manual_threshold.value = .3
img = np.zeros((100,100))
centers = [(50,50),(10,50),(50,10),(90,50),(50,90)]
present = [ True, False, False, False, False]
for center in centers:
draw_circle(img,center,15,.5)
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
for center, p in zip(centers,present):
if p:
self.assertTrue(objects.segmented[center[0],center[1]] > 0)
self.assertTrue(objects.small_removed_segmented[center[0],center[1]] > 0)
else:
self.assertTrue(objects.segmented[center[0],center[1]] == 0)
self.assertTrue(objects.small_removed_segmented[center[0],center[1]] == 0)
self.assertTrue(objects.unedited_segmented[center[0],center[1]] > 0)
def test_05_03_discard_with_mask(self):
"""Check discard of objects that are on the border of a mask"""
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.size_range.min = 10
x.size_range.max = 40
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_MANUAL
x.manual_threshold.value = .3
img = np.zeros((200,200))
centers = [(100,100),(30,100),(100,30),(170,100),(100,170)]
present = [ True, False, False, False, False]
for center in centers:
draw_circle(img,center,15,.5)
mask = np.zeros((200,200))
mask[25:175,25:175]=1
image = cpi.Image(img,mask)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
objects = object_set.get_objects("my_object")
for center, p in zip(centers,present):
if p:
self.assertTrue(objects.segmented[center[0],center[1]] > 0)
self.assertTrue(objects.small_removed_segmented[center[0],center[1]] > 0)
else:
self.assertTrue(objects.segmented[center[0],center[1]] == 0)
self.assertTrue(objects.small_removed_segmented[center[0],center[1]] == 0)
self.assertTrue(objects.unedited_segmented[center[0],center[1]] > 0)
def test_06_01_regression_diagonal(self):
"""Regression test - was using one-connected instead of 3-connected structuring element"""
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.smoothing_filter_size.value = 0
x.automatic_smoothing.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_MANUAL
x.threshold_smoothing_choice.value = I.TSM_NONE
x.manual_threshold.value = .5
img = np.zeros((10,10))
img[4,4]=1
img[5,5]=1
image = cpi.Image(img)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented[img>0] == 1))
self.assertTrue(np.all(img[segmented==1] > 0))
def test_06_02_regression_adaptive_mask(self):
"""Regression test - mask all but one pixel / adaptive"""
for o_alg in (I.O_WEIGHTED_VARIANCE, I.O_ENTROPY):
x = ID.IdentifyPrimaryObjects()
x.use_weighted_variance.value = o_alg
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.threshold_scope.value = T.TM_ADAPTIVE
x.threshold_method.value = T.TM_OTSU
np.random.seed(62)
img = np.random.uniform(size=(100,100))
mask = np.zeros(img.shape, bool)
mask[-1,-1] = True
image = cpi.Image(img, mask)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.providers.append(cpi.VanillaImageProvider("my_image",image))
object_set = cpo.ObjectSet()
measurements = cpmeas.Measurements()
pipeline = cellprofiler.pipeline.Pipeline()
x.run(Workspace(pipeline,x,image_set,object_set,measurements,None))
self.assertEqual(len(object_set.object_names),1)
self.assertTrue("my_object" in object_set.object_names)
objects = object_set.get_objects("my_object")
segmented = objects.segmented
self.assertTrue(np.all(segmented == 0))
def test_07_01_adaptive_otsu_small(self):
"""Test the function, get_threshold, using Otsu adaptive / small
Use a small image (125 x 125) to break the image into four
pieces, check that the threshold is different in each block
and that there are four blocks broken at the 75 boundary
"""
np.random.seed(0)
image = np.zeros((120,110))
for i0,i1 in ((0,60),(60,120)):
for j0,j1 in ((0,55),(55,110)):
dmin = float(i0 * 2 + j0) / 500.0
dmult = 1.0-dmin
# use the sine here to get a bimodal distribution of values
r = np.random.uniform(0,np.pi*2,(60,55))
rsin = (np.sin(r) + 1) / 2
image[i0:i1,j0:j1] = dmin + rsin * dmult
workspace, x = self.make_workspace(image)
assert isinstance(x, ID.IdentifyPrimaryObjects)
x.threshold_scope.value = T.TM_ADAPTIVE
x.threshold_method.value = T.TM_OTSU
threshold, global_threshold = x.get_threshold(
cpi.Image(image), np.ones((120,110),bool), workspace)
self.assertTrue(threshold[0,0] != threshold[0,109])
self.assertTrue(threshold[0,0] != threshold[119,0])
self.assertTrue(threshold[0,0] != threshold[119,109])
def test_07_02_adaptive_otsu_big(self):
"""Test the function, get_threshold, using Otsu adaptive / big
Use a large image (525 x 525) to break the image into 100
pieces, check that the threshold is different in each block
and that boundaries occur where expected
"""
np.random.seed(0)
image = np.zeros((525,525))
blocks = []
for i in range(10):
for j in range(10):
# the following makes a pattern of thresholds where
# each square has a different threshold from its 8-connected
# neighbors
dmin = float((i % 2) * 2 + (j%2)) / 8.0
dmult = 1.0-dmin
def b(x):
return int(float(x)*52.5)
dim = ((b(i),b(i+1)),(b(j),b(j+1)))
blocks.append(dim)
((i0,i1),(j0,j1)) = dim
# use the sine here to get a bimodal distribution of values
r = np.random.uniform(0,np.pi*2,(i1-i0,j1-j0))
rsin = (np.sin(r) + 1) / 2
image[i0:i1,j0:j1] = dmin + rsin * dmult
workspace, x = self.make_workspace(image)
assert isinstance(x, ID.IdentifyPrimaryObjects)
x.threshold_scope.value = T.TM_ADAPTIVE
x.threshold_method.value = T.TM_OTSU
threshold, global_threshold = x.get_threshold(
cpi.Image(image), np.ones((525,525),bool), workspace)
def test_08_01_per_object_otsu(self):
"""Test get_threshold using Otsu per-object"""
image = np.ones((20,20)) * .08
draw_circle(image,(5,5),2,.1)
draw_circle(image,(15,15),3,.1)
draw_circle(image,(15,15),2,.2)
labels = np.zeros((20,20),int)
draw_circle(labels,(5,5),3,1)
draw_circle(labels,(15,15),3,2)
workspace, x = self.make_workspace(image, labels=labels)
x.threshold_scope.value = I.TS_PER_OBJECT
x.threshold_method.value = T.TM_OTSU
threshold, global_threshold = x.get_threshold(cpi.Image(image),
np.ones((20,20), bool),
workspace)
t1 = threshold[5,5]
t2 = threshold[15,15]
self.assertTrue(t1 < .1)
self.assertTrue(t2 > .1)
self.assertTrue(t2 < .2)
self.assertTrue(np.all(threshold[labels==1] == threshold[5,5]))
self.assertTrue(np.all(threshold[labels==2] == threshold[15,15]))
def test_08_02_per_object_otsu_run(self):
"""Test IdentifyPrimAutomatic per object through the Run function"""
image = np.ones((20,20))*0.06
draw_circle(image,(5,5),5,.05)
draw_circle(image,(5,5),2,.15)
draw_circle(image,(15,15),5,.05)
draw_circle(image,(15,15),2,.15)
image = add_noise(image, .01)
labels = np.zeros((20,20),int)
draw_circle(labels,(5,5),5,1)
draw_circle(labels,(15,15),5,2)
expected_labels = np.zeros((20,20),int)
draw_circle(expected_labels,(5,5),2,1)
draw_circle(expected_labels,(15,15),2,2)
workspace, x = self.make_workspace(image, labels = labels)
x.exclude_size.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_PER_OBJECT
x.threshold_method.value = T.TM_OTSU
x.threshold_correction_factor.value = 1.05
x.run(workspace)
labels = workspace.object_set.get_objects(OBJECTS_NAME).segmented
# Do a little indexing trick so we can ignore which object got
# which label
self.assertNotEqual(labels[5,5], labels[15,15])
indexes = np.array([0, labels[5,5], labels[15,15]])
self.assertTrue(np.all(indexes[labels] == expected_labels))
def test_08_03_per_objects_image_mask(self):
image = np.ones((20,20))*0.06
draw_circle(image,(5,5),5,.05)
draw_circle(image,(5,5),2,.15)
image = add_noise(image, .01)
mask = np.zeros((20,20), bool)
draw_circle(mask, (5,5), 5, 1)
expected_labels = np.zeros((20,20),int)
draw_circle(expected_labels,(5,5),2,1)
workspace, x = self.make_workspace(image, mask=mask)
x.masking_objects.value = I.O_FROM_IMAGE
x.exclude_size.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_PER_OBJECT
x.threshold_method.value = T.TM_OTSU
x.threshold_correction_factor.value = 1.05
x.run(workspace)
labels = workspace.object_set.get_objects(OBJECTS_NAME).segmented
self.assertTrue(np.all(labels == expected_labels))
def test_09_01_small_images(self):
"""Test mixture of gaussians thresholding with few pixels
Run MOG to see if it blows up, given 0-10 pixels"""
r = np.random.RandomState()
r.seed(91)
image = r.uniform(size=(9, 11))
ii, jj = np.mgrid[0:image.shape[0], 0:image.shape[1]]
ii, jj = ii.flatten(), jj.flatten()
for threshold_method in (T.TM_BACKGROUND, T.TM_KAPUR, T.TM_MCT,
T.TM_MOG, T.TM_OTSU, T.TM_RIDLER_CALVARD,
T.TM_ROBUST_BACKGROUND):
for i in range(11):
mask = np.zeros(image.shape, bool)
if i:
p = r.permutation(np.prod(image.shape))[:i]
mask[ii[p], jj[p]] = True
workspace, x = self.make_workspace(image, mask)
x.threshold_method.value = threshold_method
x.threshold_scope.value = I.TS_GLOBAL
l, g = x.get_threshold(cpi.Image(image), mask, workspace)
v = image[mask]
image = r.uniform(size=(9, 11))
image[mask] = v
l1, g1 = x.get_threshold(cpi.Image(image), mask, workspace)
self.assertAlmostEqual(l1, l)
def test_09_02_mog_fly(self):
"""Test mixture of gaussians thresholding on the fly image"""
image = fly_image()
workspace, x = self.make_workspace(image)
x.threshold_method.value = T.TM_MOG
x.threshold_scope.value = I.TS_GLOBAL
x.object_fraction.value = '0.10'
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.036)
self.assertTrue(threshold < 0.040)
x.object_fraction.value = '0.20'
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.0084)
self.assertTrue(threshold < 0.0088)
x.object_fraction.value = '0.50'
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.0082)
self.assertTrue(threshold < 0.0086)
def test_10_02_test_background_fly(self):
image = fly_image()
workspace, x = self.make_workspace(image)
x.threshold_method.value = T.TM_BACKGROUND
x.threshold_scope.value = I.TS_GLOBAL
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.030)
self.assertTrue(threshold < 0.032)
def test_10_03_test_background_mog(self):
'''Test the background method with a mixture of gaussian distributions'''
np.random.seed(103)
image = np.random.normal(.2, .01, size=10000)
ind = np.random.permutation(int(image.shape[0]))[:image.shape[0] / 5]
image[ind] = np.random.normal(.5, .2, size=len(ind))
image[image < 0] = 0
image[image > 1] = 1
image[0] = 0
image[1] = 1
image.shape = (100,100)
workspace, x = self.make_workspace(image)
x.threshold_method.value = T.TM_BACKGROUND
x.threshold_scope.value = I.TS_GLOBAL
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > .18 * 2)
self.assertTrue(threshold < .22 * 2)
def test_11_01_test_robust_background_fly(self):
image = fly_image()
workspace, x = self.make_workspace(image)
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_ROBUST_BACKGROUND
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.054)
self.assertTrue(threshold < 0.056)
def test_12_01_test_ridler_calvard_background_fly(self):
image = fly_image()
workspace, x = self.make_workspace(image)
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_RIDLER_CALVARD
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.017)
self.assertTrue(threshold < 0.019)
def test_13_01_test_kapur_background_fly(self):
image = fly_image()
workspace, x = self.make_workspace(image)
x.threshold_scope.value = I.TS_GLOBAL
x.threshold_method.value = T.TM_KAPUR
local_threshold,threshold = x.get_threshold(
cpi.Image(image), np.ones(image.shape,bool), workspace)
self.assertTrue(threshold > 0.015)
self.assertTrue(threshold < 0.020)
def test_14_01_test_manual_background(self):
"""Test manual background"""
workspace, x = self.make_workspace(np.zeros((10, 10)))
x = ID.IdentifyPrimaryObjects()
x.threshold_scope.value = T.TM_MANUAL
x.manual_threshold.value = .5
local_threshold,threshold = x.get_threshold(cpi.Image(np.zeros((10,10))),
np.ones((10,10),bool),
workspace)
self.assertTrue(threshold == .5)
self.assertTrue(threshold == .5)
def test_15_01_test_binary_background(self):
img = np.zeros((200,200),np.float32)
thresh = np.zeros((200,200),bool)
draw_circle(thresh,(100,100),50,True)
draw_circle(thresh,(25,25),20,True)
workspace, x = self.make_workspace(img, binary_image=thresh)
x.exclude_size.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = I.TS_BINARY_IMAGE
x.run(workspace)
count_ftr = I.C_COUNT + "_" + OBJECTS_NAME
m = workspace.measurements
self.assertTrue(m.has_feature(cpmeas.IMAGE, count_ftr))
count = m.get_current_measurement(cpmeas.IMAGE, count_ftr)
self.assertEqual(count,2)
def test_16_01_get_measurement_columns(self):
'''Test the get_measurement_columns method'''
x = ID.IdentifyPrimaryObjects()
oname = "my_object"
x.object_name.value = oname
x.image_name.value = "my_image"
columns = x.get_measurement_columns(None)
expected_columns = [
(cpmeas.IMAGE, format%oname, coltype )
for format,coltype in ((I.FF_COUNT, cpmeas.COLTYPE_INTEGER),
(I.FF_FINAL_THRESHOLD, cpmeas.COLTYPE_FLOAT),
(I.FF_ORIG_THRESHOLD, cpmeas.COLTYPE_FLOAT),
(I.FF_WEIGHTED_VARIANCE, cpmeas.COLTYPE_FLOAT),
(I.FF_SUM_OF_ENTROPIES, cpmeas.COLTYPE_FLOAT))]
expected_columns += [(oname, feature, cpmeas.COLTYPE_FLOAT)
for feature in (I.M_LOCATION_CENTER_X,
I.M_LOCATION_CENTER_Y)]
expected_columns += [(oname, I.M_NUMBER_OBJECT_NUMBER, cpmeas.COLTYPE_INTEGER)]
self.assertEqual(len(columns), len(expected_columns))
for column in columns:
self.assertTrue(any(all([colval==exval for colval, exval in zip(column, expected)])
for expected in expected_columns))
def test_17_01_regression_holes(self):
'''Regression test - fill holes caused by filtered object
This was created as a regression test for the bug, IMG-191, but
didn't exercise the bug. It's a good test of watershed and filling
labeled holes in an odd case, so I'm leaving it in.
'''
#
# This array has two intensity peaks separated by a border.
# You should get two objects, one within the other.
#
pixels = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,1,2,2,2,2,1,2,0,0],
[0,0,2,1,2,9,2,2,1,2,0,0],
[0,0,2,1,2,2,2,2,1,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,1,2,2,2,2,2,0,0],
[0,0,2,2,1,2,2,2,2,2,0,0],
[0,0,2,2,1,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,9,9,0,0],
[0,0,2,2,2,2,2,2,9,9,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]], float) / 10.0
expected = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,1,1,1,1,1,1,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]])
mask = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,0,1,1,1,1,0,1,0,0],
[0,0,1,0,1,1,1,1,0,1,0,0],
[0,0,1,0,1,1,1,1,0,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,0,1,1,1,1,1,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]], bool)
workspace, x = self.make_workspace(pixels)
x.exclude_size.value = True
x.size_range.min = 6
x.size_range.max = 50
x.maxima_suppression_size.value = 3
x.automatic_suppression.value = False
x.watershed_method.value = ID.WA_INTENSITY
x.threshold_scope.value = T.TM_MANUAL
x.threshold_smoothing_choice.value = I.TSM_NONE
x.manual_threshold.value = .05
x.threshold_correction_factor.value = 1
x.should_save_outlines.value = True
x.save_outlines.value = "outlines"
measurements = workspace.measurements
x.run(workspace)
my_objects = workspace.object_set.get_objects(OBJECTS_NAME)
self.assertTrue(my_objects.segmented[3,3] != 0)
if my_objects.unedited_segmented[3,3] == 2:
unedited_segmented = my_objects.unedited_segmented
else:
unedited_segmented = np.array([0,2,1])[my_objects.unedited_segmented]
self.assertTrue(np.all(unedited_segmented[mask] == expected[mask]))
outlines = workspace.image_set.get_image("outlines",
must_be_binary=True)
self.assertTrue(np.all(my_objects.segmented[outlines.pixel_data] > 0))
def test_17_02_regression_holes(self):
'''Regression test - fill holes caused by filtered object
This is the real regression test for IMG-191. The smaller object
is surrounded by pixels below threshold. This prevents filling in
the unedited case.
'''
# An update to fill_labeled_holes will remove both the filtered object
# and the hole
#
if True:
return
pixels = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,3,0,0,0,0,0,0,3,0,0],
[0,0,3,0,0,0,0,0,0,3,0,0],
[0,0,3,0,0,9,2,0,0,3,0,0],
[0,0,3,0,0,0,0,0,0,3,0,0],
[0,0,3,0,0,0,0,0,0,3,0,0],
[0,0,3,2,2,2,2,2,2,2,0,0],
[0,0,3,2,2,2,2,2,2,2,0,0],
[0,0,3,2,2,2,2,2,2,2,0,0],
[0,0,2,2,2,2,2,2,9,2,0,0],
[0,0,2,2,2,2,2,2,2,2,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]], float) / 10.0
expected = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]])
mask = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,0,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,0,0,1,1,0,0,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,1,1,1,1,1,0,0],
[0,0,0,1,1,1,1,1,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0]], bool)
image = cpi.Image(pixels)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add("my_image", image)
object_set = cpo.ObjectSet()
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = True
x.size_range.min = 4
x.size_range.max = 50
x.maxima_suppression_size.value = 3
x.automatic_suppression.value = False
x.watershed_method.value = ID.WA_NONE
x.threshold_method.value = T.TM_MANUAL
x.manual_threshold.value = .1
x.threshold_correction_factor.value = 1
x.module_num = 1
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_module(x)
measurements = cpmeas.Measurements()
workspace = Workspace(pipeline, x, image_set, object_set, measurements,
image_set_list)
x.run(workspace)
my_objects = object_set.get_objects("my_object")
self.assertTrue(my_objects.segmented[3,3] != 0)
self.assertTrue(np.all(my_objects.segmented[mask] == expected[mask]))
def test_18_01_truncate_objects(self):
'''Set up a limit on the # of objects and exceed it'''
for maximum_object_count in range(2,5):
pixels = np.zeros((20,21))
pixels[2:8,2:8] = .5
pixels[12:18,2:8] = .5
pixels[2:8,12:18] = .5
pixels[12:18,12:18] = .5
image = cpi.Image(pixels)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add("my_image", image)
object_set = cpo.ObjectSet()
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.unclump_method.value = ID.UN_NONE
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = T.TM_MANUAL
x.manual_threshold.value = .25
x.threshold_smoothing_choice.value = I.TSM_NONE
x.threshold_correction_factor.value = 1
x.limit_choice.value = ID.LIMIT_TRUNCATE
x.maximum_object_count.value = maximum_object_count
x.module_num = 1
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_module(x)
measurements = cpmeas.Measurements()
workspace = Workspace(pipeline, x, image_set, object_set, measurements,
image_set_list)
x.run(workspace)
self.assertEqual(measurements.get_current_image_measurement(
"Count_my_object"), maximum_object_count)
my_objects = object_set.get_objects("my_object")
self.assertEqual(np.max(my_objects.segmented), maximum_object_count)
self.assertEqual(np.max(my_objects.unedited_segmented), 4)
def test_18_02_erase_objects(self):
'''Set up a limit on the # of objects and exceed it - erasing objects'''
maximum_object_count = 3
pixels = np.zeros((20,21))
pixels[2:8,2:8] = .5
pixels[12:18,2:8] = .5
pixels[2:8,12:18] = .5
pixels[12:18,12:18] = .5
image = cpi.Image(pixels)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add("my_image", image)
object_set = cpo.ObjectSet()
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.unclump_method.value = ID.UN_NONE
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = T.TM_MANUAL
x.threshold_smoothing_choice.value = I.TSM_NONE
x.manual_threshold.value = .25
x.threshold_correction_factor.value = 1
x.limit_choice.value = ID.LIMIT_ERASE
x.maximum_object_count.value = maximum_object_count
x.module_num = 1
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_module(x)
measurements = cpmeas.Measurements()
workspace = Workspace(pipeline, x, image_set, object_set, measurements,
image_set_list)
x.run(workspace)
self.assertEqual(measurements.get_current_image_measurement(
"Count_my_object"), 0)
my_objects = object_set.get_objects("my_object")
self.assertTrue(np.all(my_objects.segmented == 0))
self.assertEqual(np.max(my_objects.unedited_segmented), 4)
def test_18_03_dont_erase_objects(self):
'''Ask to erase objects, but don't'''
maximum_object_count = 5
pixels = np.zeros((20,21))
pixels[2:8,2:8] = .5
pixels[12:18,2:8] = .5
pixels[2:8,12:18] = .5
pixels[12:18,12:18] = .5
image = cpi.Image(pixels)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add("my_image", image)
object_set = cpo.ObjectSet()
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "my_object"
x.image_name.value = "my_image"
x.exclude_size.value = False
x.unclump_method.value = ID.UN_NONE
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = T.TM_MANUAL
x.threshold_smoothing_choice.value = I.TSM_NONE
x.manual_threshold.value = .25
x.threshold_correction_factor.value = 1
x.limit_choice.value = ID.LIMIT_ERASE
x.maximum_object_count.value = maximum_object_count
x.module_num = 1
pipeline = cellprofiler.pipeline.Pipeline()
pipeline.add_module(x)
measurements = cpmeas.Measurements()
workspace = Workspace(pipeline, x, image_set, object_set, measurements,
image_set_list)
x.run(workspace)
self.assertEqual(measurements.get_current_image_measurement(
"Count_my_object"), 4)
my_objects = object_set.get_objects("my_object")
self.assertEqual(np.max(my_objects.segmented), 4)
def test_19_01_threshold_by_measurement(self):
'''Set threshold based on mean image intensity'''
pixels = np.zeros((10,10))
pixels[2:6,2:6] = .5
image = cpi.Image(pixels)
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
image_set.add("MyImage", image)
object_set = cpo.ObjectSet()
pipeline = cellprofiler.pipeline.Pipeline()
measurements = cpmeas.Measurements()
measurements.add_image_measurement("MeanIntensity_MyImage", np.mean(pixels))
x = ID.IdentifyPrimaryObjects()
x.object_name.value = "MyObject"
x.image_name.value = "MyImage"
x.exclude_size.value = False
x.unclump_method.value = ID.UN_NONE
x.watershed_method.value = ID.WA_NONE
x.threshold_scope.value = T.TM_MEASUREMENT
x.threshold_smoothing_choice.value = I.TSM_NONE
x.thresholding_measurement.value = "MeanIntensity_MyImage"
x.threshold_correction_factor.value = 1
x.module_num = 1
pipeline.add_module(x)
workspace = Workspace(pipeline, x, image_set, object_set, measurements,
image_set_list)
x.run(workspace)
self.assertEqual(measurements.get_current_image_measurement("Count_MyObject"),1)
self.assertEqual(measurements.get_current_image_measurement("Threshold_FinalThreshold_MyObject"),np.mean(pixels))
def test_20_01_threshold_smoothing_automatic(self):
image = np.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, .4, .4, .4, 0, 0],
[ 0, 0, .4, .5, .4, 0, 0],
[ 0, 0, .4, .4, .4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]])
expected = np.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 1, 1, 1, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]])
workspace, module = self.make_workspace(image)
assert isinstance(module, ID.IdentifyPrimaryObjects)
module.exclude_size.value = False
module.unclump_method.value = ID.UN_NONE
module.watershed_method.value = ID.WA_NONE
# MCT on this image is zero, so set the threshold at .225
# with the threshold minimum (manual = no smoothing)
module.threshold_scope.value = I.TS_GLOBAL
module.threshold_method.value = T.TM_MCT
module.threshold_range.min= .225
module.threshold_smoothing_choice.value = I.TSM_AUTOMATIC
module.run(workspace)
labels = workspace.object_set.get_objects(OBJECTS_NAME).segmented
np.testing.assert_array_equal(expected, labels)
def test_20_02_threshold_smoothing_manual(self):
image = np.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, .4, .4, .4, 0, 0],
[ 0, 0, .4, .5, .4, 0, 0],
[ 0, 0, .4, .4, .4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]])
expected = np.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 1, 1, 1, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]])
workspace, module = self.make_workspace(image)
assert isinstance(module, ID.IdentifyPrimaryObjects)
module.exclude_size.value = False
module.unclump_method.value = ID.UN_NONE
module.watershed_method.value = ID.WA_NONE
module.threshold_scope.value = I.TS_GLOBAL
module.threshold_method.value = T.TM_MCT
module.threshold_range.min= .125
module.threshold_smoothing_choice.value = I.TSM_MANUAL
module.threshold_smoothing_scale.value = 3
module.run(workspace)
labels = workspace.object_set.get_objects(OBJECTS_NAME).segmented
np.testing.assert_array_equal(expected, labels)
def test_20_03_threshold_no_smoothing(self):
image = np.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, .4, .4, .4, 0, 0],
[ 0, 0, .4, .5, .4, 0, 0],
[ 0, 0, .4, .4, .4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]])
expected = np.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]])
for ts in I.TS_MANUAL, I.TS_MEASUREMENT:
workspace, module = self.make_workspace(image)
assert isinstance(module, ID.IdentifyPrimaryObjects)
module.exclude_size.value = False
module.unclump_method.value = ID.UN_NONE
module.watershed_method.value = ID.WA_NONE
module.threshold_scope.value = ts
module.manual_threshold.value = .125
module.thresholding_measurement.value = MEASUREMENT_NAME
workspace.measurements[cpmeas.IMAGE, MEASUREMENT_NAME] = .125
module.threshold_smoothing_choice.value = I.TSM_MANUAL
module.threshold_smoothing_scale.value = 3
module.run(workspace)
labels = workspace.object_set.get_objects(OBJECTS_NAME).segmented
np.testing.assert_array_equal(expected, labels)
def add_noise(img, fraction):
'''Add a fractional amount of noise to an image to make it look real'''
np.random.seed(0)
noise = np.random.uniform(low=1-fraction/2, high=1+fraction/2,
size=img.shape)
return img * noise
def one_cell_image():
img = np.zeros((25,25))
draw_circle(img,(10,15),5, .5)
return add_noise(img,.01)
def two_cell_image():
img = np.zeros((50,50))
draw_circle(img,(10,35),5, .8)
draw_circle(img,(30,15),5, .6)
return add_noise(img,.01)
def fly_image():
return read_example_image('ExampleFlyImages','01_POS002_D.TIF')
def draw_circle(img,center,radius,value):
x,y=np.mgrid[0:img.shape[0],0:img.shape[1]]
distance = np.sqrt((x-center[0])*(x-center[0])+(y-center[1])*(y-center[1]))
img[distance<=radius]=value
class TestWeightedVariance(unittest.TestCase):
def test_01_masked_wv(self):
output = T.weighted_variance(np.zeros((3,3)),
np.zeros((3,3),bool), 1)
self.assertEqual(output, 0)
def test_02_zero_wv(self):
output = T.weighted_variance(np.zeros((3,3)),
np.ones((3,3),bool),
np.ones((3,3),bool))
self.assertEqual(output, 0)
def test_03_fg_0_bg_0(self):
"""Test all foreground pixels same, all background same, wv = 0"""
img = np.zeros((4,4))
img[:,2:4]=1
binary_image = img > .5
output = T.weighted_variance(img, np.ones(img.shape,bool), binary_image)
self.assertEqual(output,0)
def test_04_values(self):
"""Test with two foreground and two background values"""
#
# The log of this array is [-4,-3],[-2,-1] and
# the variance should be (.25 *2 + .25 *2)/4 = .25
img = np.array([[1.0/16.,1.0/8.0],[1.0/4.0,1.0/2.0]])
binary_image = np.array([[False, False], [True, True]])
output = T.weighted_variance(img, np.ones((2,2),bool), binary_image)
self.assertAlmostEqual(output,.25)
def test_05_mask(self):
"""Test, masking out one of the background values"""
#
# The log of this array is [-4,-3],[-2,-1] and
# the variance should be (.25*2 + .25 *2)/4 = .25
img = np.array([[1.0/16.,1.0/16.0,1.0/8.0],[1.0/4.0,1.0/4.0,1.0/2.0]])
mask = np.array([[False,True,True],[False,True,True]])
binary_image = np.array([[False, False, False], [True, True, True]])
output = T.weighted_variance(img, mask, binary_image)
self.assertAlmostEquals(output,.25)
class TestSumOfEntropies(unittest.TestCase):
def test_01_all_masked(self):
output = T.sum_of_entropies(np.zeros((3,3)),
np.zeros((3,3),bool), 1)
self.assertEqual(output,0)
def test_020_all_zero(self):
"""Can't take the log of zero, so all zero matrix = 0"""
output = T.sum_of_entropies(np.zeros((4,2)),
np.ones((4,2),bool),
np.ones((4,2), bool))
self.assertAlmostEqual(output,0)
def test_03_fg_bg_equal(self):
img = np.ones((128,128))
img[0:64,:] *= .15
img[64:128,:] *= .85
img[0, 0] = img[-1, 0] = 0
img[0, -1] = img[-1, -1] = 1
binary_mask = np.zeros(img.shape, bool)
binary_mask[64:, :] = True
#
# You need one foreground and one background pixel to defeat a
# divide-by-zero (that's appropriately handled)
#
one_of_each = np.zeros(img.shape, bool)
one_of_each[0,0] = one_of_each[-1, -1] = True
output = T.sum_of_entropies(img, np.ones((128,128),bool), binary_mask)
ob = T.sum_of_entropies(img, one_of_each | ~binary_mask, binary_mask)
of = T.sum_of_entropies(img, one_of_each | binary_mask, binary_mask)
self.assertAlmostEqual(output, ob + of)
def test_04_fg_bg_different(self):
img = np.ones((128,128))
img[0:64,0:64] *= .15
img[0:64,64:128] *= .3
img[64:128,0:64] *= .7
img[64:128,64:128] *= .85
binary_mask = np.zeros(img.shape, bool)
binary_mask[64:, :] = True
one_of_each = np.zeros(img.shape, bool)
one_of_each[0,0] = one_of_each[-1, -1] = True
output = T.sum_of_entropies(img, np.ones((128,128),bool), binary_mask)
ob = T.sum_of_entropies(img, one_of_each | ~binary_mask, binary_mask)
of = T.sum_of_entropies(img, one_of_each | binary_mask, binary_mask)
self.assertAlmostEqual(output, ob + of)
| gpl-2.0 |
catapult-project/catapult-csm | third_party/gsutil/third_party/httplib2/python2/httplib2/__init__.py | 29 | 69586 | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.7"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = [
'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation',
'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError',
'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
try:
# Users can optionally provide a module that tells us where the CA_CERTS
# are located.
import ca_certs_locater
CA_CERTS = ca_certs_locater.get()
except ImportError:
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (
self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'])
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
def proxy_info_from_environment(method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = proxy_info_from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
def proxy_info_from_url(url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return ProxyInfo(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
except ImportError:
from google3.apphosting.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google3.apphosting.api.urlfetch import fetch
from google3.apphosting.api.urlfetch import InvalidURLError
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(url, payload=None, method="GET", headers={},
allow_truncated=False, follow_redirects=True,
deadline=5):
return fetch(url, payload=payload, method=method, headers=header,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects, deadline=deadline,
validate_certificate=validate_certificate)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs, and
disable_ssl_certificate_validation are all dropped on the ground.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPConnection.__init__(self, host, port=port,
strict=strict, timeout=timeout)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict,
timeout=timeout)
self._fetch = _new_fixed_fetch(
not disable_ssl_certificate_validation)
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if 'request' in state_dict:
del state_dict['request']
if 'connections' in state_dict:
del state_dict['connections']
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(RETRIES):
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, 'sock') and conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response({
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response({
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| bsd-3-clause |
surgebiswas/poker | PokerBots_2017/Johnny/scipy/weave/ext_tools.py | 92 | 17820 | from __future__ import absolute_import, print_function
import os
import sys
import re
from . import catalog
from . import build_tools
from . import converters
from . import base_spec
class ext_function_from_specs(object):
def __init__(self,name,code_block,arg_specs):
self.name = name
self.arg_specs = base_spec.arg_spec_list(arg_specs)
self.code_block = code_block
self.compiler = ''
self.customize = base_info.custom_info()
def header_code(self):
pass
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args,' \
' PyObject* kywds)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args,' \
' PyObject* kywds)\n{\n'
return code % self.name
#def cpp_function_declaration_code(self):
# pass
#def cpp_function_call_code(self):
#s pass
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py_local_dict = NULL;\n'
arg_string_list = self.arg_specs.variable_as_strings() + ['"local_dict"']
arg_strings = ','.join(arg_string_list)
if arg_strings:
arg_strings += ','
declare_kwlist = 'static const char *kwlist[] = {%s NULL};\n' % \
arg_strings
py_objects = ', '.join(self.arg_specs.py_pointers())
init_flags = ', '.join(self.arg_specs.init_flags())
init_flags_init = '= '.join(self.arg_specs.init_flags())
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects + ';\n'
declare_py_objects += 'int ' + init_flags + ';\n'
init_values = py_vars + ' = NULL;\n'
init_values += init_flags_init + ' = 0;\n\n'
else:
declare_py_objects = ''
init_values = ''
#Each variable is in charge of its own cleanup now.
#cnt = len(arg_list)
#declare_cleanup = "blitz::TinyVector<PyObject*,%d> clean_up(0);\n" % cnt
ref_string = ', '.join(self.arg_specs.py_references())
if ref_string:
ref_string += ', &py_local_dict'
else:
ref_string = '&py_local_dict'
format = "O" * len(self.arg_specs) + "|O" + ':' + self.name
parse_tuple = 'if(!PyArg_ParseTupleAndKeywords(args,' \
'kywds,"%s",const_cast<char**>(kwlist),%s))\n' % \
(format,ref_string)
parse_tuple += ' return NULL;\n'
return declare_return + declare_kwlist + declare_py_objects \
+ init_values + parse_tuple
def arg_declaration_code(self):
arg_strings = []
for arg in self.arg_specs:
arg_strings.append(arg.declaration_code())
arg_strings.append(arg.init_flag() + " = 1;\n")
code = "".join(arg_strings)
return code
def arg_cleanup_code(self):
arg_strings = []
have_cleanup = filter(lambda x:x.cleanup_code(),self.arg_specs)
for arg in have_cleanup:
code = "if(%s)\n" % arg.init_flag()
code += "{\n"
code += indent(arg.cleanup_code(),4)
code += "}\n"
arg_strings.append(code)
code = "".join(arg_strings)
return code
def arg_local_dict_code(self):
arg_strings = []
for arg in self.arg_specs:
arg_strings.append(arg.local_dict_code())
code = "".join(arg_strings)
return code
def function_code(self):
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
local_dict_code = indent(self.arg_local_dict_code(),4)
dict_code = "if(py_local_dict) \n" \
"{ \n" \
" py::dict local_dict = py::dict(py_local_dict); \n" + \
local_dict_code + \
"} \n"
try_code = "try \n" \
"{ \n" + \
decl_code + \
" /*<function call here>*/ \n" + \
function_code + \
indent(dict_code,4) + \
"\n} \n"
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /*cleanup code*/ \n" + \
cleanup_code + \
' if(!(PyObject*)return_val && !exception_occurred)\n' \
' {\n \n' \
' return_val = Py_None; \n' \
' }\n \n' \
' return return_val.disown(); \n' \
'} \n'
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
indent(try_code,4) + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS|' \
'METH_KEYWORDS},\n' % args
return function_decls
def set_compiler(self,compiler):
self.compiler = compiler
for arg in self.arg_specs:
arg.set_compiler(compiler)
class ext_function(ext_function_from_specs):
def __init__(self,name,code_block, args, local_dict=None, global_dict=None,
auto_downcast=1, type_converters=None):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if type_converters is None:
type_converters = converters.default
arg_specs = assign_variable_types(args,local_dict, global_dict,
auto_downcast, type_converters)
ext_function_from_specs.__init__(self,name,code_block,arg_specs)
from . import base_info
class ext_module(object):
def __init__(self,name,compiler=''):
standard_info = converters.standard_info
self.name = name
self.functions = []
self.compiler = compiler
self.customize = base_info.custom_info()
self._build_information = base_info.info_list(standard_info)
def add_function(self,func):
self.functions.append(func)
def module_code(self):
code = '\n'.join([
"""\
#ifdef __CPLUSPLUS__
extern "C" {
#endif
""",
self.warning_code(),
self.header_code(),
self.support_code(),
self.function_code(),
self.python_function_definition_code(),
self.module_init_code(),
"""\
#ifdef __CPLUSCPLUS__
}
#endif
"""
])
return code
def arg_specs(self):
all_arg_specs = base_spec.arg_spec_list()
for func in self.functions:
all_arg_specs += func.arg_specs
return all_arg_specs
def build_information(self):
info = self._build_information + [self.customize] + \
self.arg_specs().build_information()
for func in self.functions:
info.append(func.customize)
#redundant, but easiest place to make sure compiler is set
for i in info:
i.set_compiler(self.compiler)
return info
def get_headers(self):
all_headers = self.build_information().headers()
# blitz/array.h always needs to go before most other headers, so we
# hack that here, but we need to ensure that Python.h is the very
# first header included. As indicated in
# http://docs.python.org/api/includes.html
# "Warning: Since Python may define some pre-processor definitions which
# affect the standard headers on some systems, you must include Python.h
# before any standard headers are included. "
# Since blitz/array.h pulls in system headers, we must massage this
# list a bit so that the order is Python.h, blitz/array.h, ...
if '"blitz/array.h"' in all_headers:
all_headers.remove('"blitz/array.h"')
# Insert blitz AFTER Python.h, which must remain the first header
all_headers.insert(1,'"blitz/array.h"')
return all_headers
def warning_code(self):
all_warnings = self.build_information().warnings()
w = map(lambda x: "#pragma warning(%s)\n" % x,all_warnings)
return '#ifndef __GNUC__\n' + ''.join(w) + '\n#endif'
def header_code(self):
h = self.get_headers()
h = map(lambda x: '#include ' + x + '\n',h)
return ''.join(h) + '\n'
def support_code(self):
code = self.build_information().support_code()
return ''.join(code) + '\n'
def function_code(self):
all_function_code = ""
for func in self.functions:
all_function_code += func.function_code()
return ''.join(all_function_code) + '\n'
def python_function_definition_code(self):
all_definition_code = ""
for func in self.functions:
all_definition_code += func.python_function_definition_code()
all_definition_code = indent(''.join(all_definition_code),4)
code = 'static PyMethodDef compiled_methods[] = \n' \
'{\n' \
'%s' \
' {NULL, NULL} /* Sentinel */\n' \
'};\n'
return code % (all_definition_code)
def module_init_code(self):
init_code_list = self.build_information().module_init_code()
init_code = indent(''.join(init_code_list),4)
code = 'PyMODINIT_FUNC init%s(void)\n' \
'{\n' \
'%s' \
' (void) Py_InitModule("%s", compiled_methods);\n' \
'}\n' % (self.name,init_code,self.name)
return code
def generate_file(self,file_name="",location='.'):
code = self.module_code()
if not file_name:
file_name = self.name + '.cpp'
name = generate_file_name(file_name,location)
#return name
return generate_module(code,name)
def set_compiler(self,compiler):
# This is not used anymore -- I think we should ditch it.
#for i in self.arg_specs()
# i.set_compiler(compiler)
for i in self.build_information():
i.set_compiler(compiler)
for i in self.functions:
i.set_compiler(compiler)
self.compiler = compiler
def build_kw_and_file(self,location,kw):
arg_specs = self.arg_specs()
info = self.build_information()
_source_files = info.sources()
# remove duplicates
source_files = {}
for i in _source_files:
source_files[i] = None
source_files = source_files.keys()
# add internally specified macros, includes, etc. to the key words
# values of the same names so that distutils will use them.
kw['define_macros'] = kw.get('define_macros',[]) + \
info.define_macros()
kw['include_dirs'] = kw.get('include_dirs',[]) + info.include_dirs()
kw['libraries'] = kw.get('libraries',[]) + info.libraries()
kw['library_dirs'] = kw.get('library_dirs',[]) + info.library_dirs()
kw['extra_compile_args'] = kw.get('extra_compile_args',[]) + \
info.extra_compile_args()
kw['extra_link_args'] = kw.get('extra_link_args',[]) + \
info.extra_link_args()
kw['sources'] = kw.get('sources',[]) + source_files
file = self.generate_file(location=location)
return kw,file
def setup_extension(self,location='.',**kw):
kw,file = self.build_kw_and_file(location,kw)
return build_tools.create_extension(file, **kw)
def compile(self,location='.',compiler=None, verbose=0, **kw):
if compiler is not None:
self.compiler = compiler
# !! removed -- we don't have any compiler dependent code
# currently in spec or info classes
# hmm. Is there a cleaner way to do this? Seems like
# choosing the compiler spagettis around a little.
#compiler = build_tools.choose_compiler(self.compiler)
#self.set_compiler(compiler)
kw,file = self.build_kw_and_file(location,kw)
# This is needed so that files build correctly even when different
# versions of Python are running around.
# Imported at beginning of file now to help with test paths.
# import catalog
#temp = catalog.default_temp_dir()
# for speed, build in the machines temp directory
temp = catalog.intermediate_dir()
success = build_tools.build_extension(file, temp_dir=temp,
compiler_name=compiler,
verbose=verbose, **kw)
if not success:
raise SystemError('Compilation failed')
def generate_file_name(module_name,module_location):
module_file = os.path.join(module_location,module_name)
return os.path.abspath(module_file)
def generate_module(module_string, module_file):
""" generate the source code file. Only overwrite
the existing file if the actual source has changed.
"""
file_changed = 1
if os.path.exists(module_file):
f = open(module_file,'r')
old_string = f.read()
f.close()
if old_string == module_string:
file_changed = 0
if file_changed:
f = open(module_file,'w')
f.write(module_string)
f.close()
return module_file
def assign_variable_types(variables,local_dict={}, global_dict={},
auto_downcast=1,
type_converters=converters.default):
incoming_vars = {}
incoming_vars.update(global_dict)
incoming_vars.update(local_dict)
variable_specs = []
errors = {}
for var in variables:
try:
example_type = incoming_vars[var]
# look through possible type specs to find which one
# should be used to for example_type
spec = None
for factory in type_converters:
if factory.type_match(example_type):
spec = factory.type_spec(var,example_type)
break
if not spec:
# should really define our own type.
raise IndexError
else:
variable_specs.append(spec)
except KeyError:
errors[var] = ("The type and dimensionality specifications" +
"for variable '" + var + "' are missing.")
except IndexError:
errors[var] = ("Unable to convert variable '" + var +
"' to a C++ type.")
if errors:
raise TypeError(format_error_msg(errors))
if auto_downcast:
variable_specs = downcast(variable_specs)
return variable_specs
def downcast(var_specs):
""" Cast python scalars down to most common type of
arrays used.
Right now, focus on complex and float types. Ignore int types.
Require all arrays to have same type before forcing downcasts.
Note: var_specs are currently altered in place (horrors...!)
"""
numeric_types = []
#grab all the numeric types associated with a variables.
for var in var_specs:
if hasattr(var,'numeric_type'):
numeric_types.append(var.numeric_type)
# if arrays are present, but none of them are double precision,
# make all numeric types float or complex(float)
if (('f' in numeric_types or 'F' in numeric_types) and not (
'd' in numeric_types or 'D' in numeric_types)):
for var in var_specs:
if hasattr(var,'numeric_type'):
if issubclass(var.numeric_type, complex):
var.numeric_type = 'F'
elif issubclass(var.numeric_type, float):
var.numeric_type = 'f'
return var_specs
def indent(st,spaces):
indention = ' '*spaces
indented = indention + st.replace('\n','\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$',r'',indented)
return indented
def format_error_msg(errors):
#minimum effort right now...
import pprint
import cStringIO
msg = cStringIO.StringIO()
pprint.pprint(errors,msg)
return msg.getvalue()
| mit |
nitin-cherian/Webapps | SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/django/contrib/messages/api.py | 48 | 3147 | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
try:
messages = request._messages
except AttributeError:
if not hasattr(request, 'META'):
raise TypeError(
"add_message() argument must be an HttpRequest object, not "
"'%s'." % request.__class__.__name__
)
if not fail_silently:
raise MessageFailure(
'You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware'
)
else:
return messages.add(level, message, extra_tags)
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
return getattr(request, '_messages', [])
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
storage = getattr(request, '_messages', default_storage(request))
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| mit |
averagehat/scikit-bio | skbio/io/format/fasta.py | 3 | 38087 | """
FASTA/QUAL format (:mod:`skbio.io.format.fasta`)
================================================
.. currentmodule:: skbio.io.format.fasta
The FASTA file format (``fasta``) stores biological (i.e., nucleotide or
protein) sequences in a simple plain text format that is both human-readable
and easy to parse. The file format was first introduced and used in the FASTA
software package [1]_. Additional descriptions of the file format can be found
in [2]_ and [3]_.
An example of a FASTA-formatted file containing two DNA sequences::
>seq1 db-accession-149855
CGATGTCGATCGATCGATCGATCAG
>seq2 db-accession-34989
CATCGATCGATCGATGCATGCATGCATG
The QUAL file format is an additional format related to FASTA. A FASTA file is
sometimes accompanied by a QUAL file, particuarly when the FASTA file contains
sequences generated on a high-throughput sequencing instrument. QUAL files
store a Phred quality score (nonnegative integer) for each base in a sequence
stored in FASTA format (see [4]_ for more details). scikit-bio supports reading
and writing FASTA (and optionally QUAL) file formats.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |generator of :mod:`skbio.sequence.Sequence` objects |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.alignment.SequenceCollection` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.alignment.Alignment` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.Sequence` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.DNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.RNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.Protein` |
+------+------+---------------------------------------------------------------+
.. note:: All readers and writers support an optional QUAL file via the
``qual`` parameter. If one is provided, quality scores will be read/written
in addition to FASTA sequence data.
Format Specification
--------------------
The following sections define the FASTA and QUAL file formats in detail.
FASTA Format
^^^^^^^^^^^^
A FASTA file contains one or more biological sequences. The sequences are
stored sequentially, with a *record* for each sequence (also referred to as a
*FASTA record*). Each *record* consists of a single-line *header* (sometimes
referred to as a *defline*, *label*, *description*, or *comment*) followed by
the sequence data, optionally split over multiple lines.
.. note:: Blank or whitespace-only lines are only allowed at the beginning of
the file, between FASTA records, or at the end of the file. A blank or
whitespace-only line after the header line, within the sequence (for FASTA
files), or within quality scores (for QUAL files) will raise an error.
scikit-bio will ignore leading and trailing whitespace characters on each
line while reading.
.. note:: scikit-bio does not currently support legacy FASTA format (i.e.,
headers/comments denoted with a semicolon). The format supported by
scikit-bio (described below in detail) most closely resembles the
description given in NCBI's BLAST documentation [3]_. See [2]_ for more
details on legacy FASTA format. If you would like legacy FASTA format
support added to scikit-bio, please consider submitting a feature request on
the
`scikit-bio issue tracker <https://github.com/biocore/scikit-bio/issues>`_
(pull requests are also welcome!).
Sequence Header
~~~~~~~~~~~~~~~
Each sequence header consists of a single line beginning with a greater-than
(``>``) symbol. Immediately following this is a sequence identifier (ID) and
description separated by one or more whitespace characters. The sequence ID and
description are stored in the sequence `metadata` attribute, under the `'id'`
and `'description'` keys, repectively. Both are optional. Each will be
represented as the empty string (``''``) in `metadata` if it is not present
in the header.
A sequence ID consists of a single *word*: all characters after the greater-
than symbol and before the first whitespace character (if any) are taken as the
sequence ID. Unique sequence IDs are not strictly enforced by the FASTA format
itself. A single standardized ID format is similarly not enforced by the FASTA
format, though it is often common to use a unique library accession number for
a sequence ID (e.g., NCBI's FASTA defline format [5]_).
.. note:: scikit-bio will enforce sequence ID uniqueness depending on the type
of object that the FASTA file is read into. For example, reading a FASTA
file as a generator of ``Sequence`` objects will not enforce
unique IDs since it simply yields each sequence it finds in the FASTA file.
However, if the FASTA file is read into a ``SequenceCollection`` object, ID
uniqueness will be enforced because that is a requirement of a
``SequenceCollection``.
If a description is present, it is taken as the remaining characters that
follow the sequence ID and initial whitespace(s). The description is considered
additional information about the sequence (e.g., comments about the source of
the sequence or the molecule that it encodes).
For example, consider the following header::
>seq1 db-accession-149855
``seq1`` is the sequence ID and ``db-accession-149855`` is the sequence
description.
.. note:: scikit-bio's readers will remove all leading and trailing whitespace
from the description. If a header line begins with whitespace following the
``>``, the ID is assumed to be missing and the remainder of the line is
taken as the description.
Sequence Data
~~~~~~~~~~~~~
Biological sequence data follows the header, and can be split over multiple
lines. The sequence data (i.e., nucleotides or amino acids) are stored using
the standard IUPAC lexicon (single-letter codes).
.. note:: scikit-bio supports both upper and lower case characters.
This functionality depends on the type of object the data is
being read into. For ``Sequence``
objects, sciki-bio doesn't care about the case. However, for other object
types, such as :class:`skbio.sequence.DNA`, :class:`skbio.sequence.RNA`,
and :class:`skbio.sequence.Protein`, the `lowercase` parameter
must be used to control case functionality. Refer to the documentation for
the constructors for details.
.. note:: Both ``-`` and ``.`` are supported as gap characters. See
:mod:`skbio.sequence` for more details on how scikit-bio interprets
sequence data in its in-memory objects.
Validation is performed for all scikit-bio objects which support it. This
consists of all objects which enforce usage of IUPAC characters. If any
invalid IUPAC characters are found in the sequence while reading from the
FASTA file, an exception is raised.
QUAL Format
^^^^^^^^^^^
A QUAL file contains quality scores for one or more biological sequences stored
in a corresponding FASTA file. QUAL format is very similar to FASTA format: it
stores records sequentially, with each record beginning with a header line
containing a sequence ID and description. The same rules apply to QUAL headers
as FASTA headers (see the above sections for details). scikit-bio processes
FASTA and QUAL headers in exactly the same way.
Quality scores are automatically stored in the object's `positional_metadata`
attribute, under the `'quality'` column.
Instead of storing biological sequence data in each record, a QUAL file stores
a Phred quality score for each base in the corresponding sequence. Quality
scores are represented as nonnegative integers separated by whitespace
(typically a single space or newline), and can span multiple lines.
.. note:: When reading FASTA and QUAL files, scikit-bio requires records to be
in the same order in both files (i.e., each FASTA and QUAL record must have
the same ID and description after being parsed). In addition to having the
same order, the number of FASTA records must match the number of QUAL
records (i.e., missing or additonal records are not allowed). scikit-bio
also requires that the number of quality scores match the number of bases in
the corresponding sequence.
When writing FASTA and QUAL files, scikit-bio will maintain the same
ordering of records in both files (i.e., using the same ID and description
in both records) to support future reading.
Format Parameters
-----------------
The following parameters are available to change how FASTA/QUAL files are read
or written in scikit-bio.
QUAL File Parameter (Readers and Writers)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``qual`` parameter is available to all FASTA format readers and writers. It
can be any file-like type supported by scikit-bio's I/O registry (e.g., file
handle, file path, etc.). If ``qual`` is provided when reading, quality scores
will be included in each in-memory ``Sequence`` object, in addition
to sequence data stored in the FASTA file. When writing, quality scores will be
written in QUAL format in addition to the sequence data being written in FASTA
format.
Reader-specific Parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^
The available reader parameters differ depending on which reader is used.
Generator, SequenceCollection, and Alignment Reader Parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``constructor`` parameter can be used with the ``Sequence``
generator, ``SequenceCollection``, and ``Alignment`` FASTA readers.
``constructor`` specifies the in-memory type of each sequence that is parsed,
and defaults to ``Sequence``. ``constructor`` should be a subclass of
``Sequence``. For example, if you know that the FASTA file you're
reading contains protein sequences, you would pass
``constructor=Protein`` to the reader call.
.. note:: The FASTA sniffer will not attempt to guess the ``constructor``
parameter, so it will always default to ``Sequence`` if another
type is not provided to the reader.
Sequence Reader Parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``seq_num`` parameter can be used with the ``Sequence``,
``DNA``, ``RNA``, and ``Protein`` FASTA readers. ``seq_num`` specifies which
sequence to read from the FASTA file (and optional QUAL file), and defaults to
1 (i.e., such that the first sequence is read). For example, to read the 50th
sequence from a FASTA file, you would pass ``seq_num=50`` to the reader call.
Writer-specific Parameters
^^^^^^^^^^^^^^^^^^^^^^^^^^
The following parameters are available to all FASTA format writers:
- ``id_whitespace_replacement``: string to replace **each** whitespace
character in a sequence ID. This parameter is useful for cases where an
in-memory sequence ID contains whitespace, which would result in an on-disk
representation that would not be read back into memory as the same ID (since
IDs in FASTA format cannot contain whitespace). Defaults to ``_``. If
``None``, no whitespace replacement is performed and IDs are written as they
are stored in memory (this has the potential to create an invalid
FASTA-formatted file; see note below). This parameter also applies to a QUAL
file if one is provided.
- ``description_newline_replacement``: string to replace **each** newline
character in a sequence description. Since a FASTA header must be a single
line, newlines are not allowed in sequence descriptions and must be replaced
in order to write a valid FASTA file. Defaults to a single space. If
``None``, no newline replacement is performed and descriptions are written as
they are stored in memory (this has the potential to create an invalid
FASTA-formatted file; see note below). This parameter also applies to a QUAL
file if one is provided.
- ``max_width``: integer specifying the maximum line width (i.e., number of
characters) for sequence data and/or quality scores. If a sequence or its
quality scores are longer than ``max_width``, it will be split across
multiple lines, each with a maximum width of ``max_width``. Note that there
are some caveats when splitting quality scores. A single quality score will
*never* be split across multiple lines, otherwise it would become two
different quality scores when read again. Thus, splitting only occurs
*between* quality scores. This makes it possible to have a single long
quality score written on its own line that exceeds ``max_width``. For
example, the quality score ``12345`` would not be split across multiple lines
even if ``max_width=3``. Thus, a 5-character line would be written. Default
behavior is to not split sequence data or quality scores across multiple
lines.
- ``lowercase``: String or boolean array. If a string, it is treated as a key
into the positional metadata of the object. If a boolean array, it
indicates characters to write in lowercase. Characters in the sequence
corresponding to `True` values will be written in lowercase. The boolean
array must be the same length as the sequence.
.. note:: The FASTA format writers will have noticeably better runtime
performance if ``id_whitespace_replacement`` and/or
``description_newline_replacement`` are set to ``None`` so that whitespace
replacement is not performed during writing. However, this can potentially
create invalid FASTA files, especially if there are newline characters in
the IDs or descriptions. For IDs with whitespace, this can also affect how
the IDs are read into memory in a subsequent read operation. For example, if
an in-memory sequence ID is ``'seq 1'`` and
``id_whitespace_replacement=None``, reading the FASTA file back into memory
would result in an ID of ``'seq'``, and ``'1'`` would be part of the
sequence description.
Examples
--------
Reading and Writing FASTA Files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Suppose we have the following FASTA file with five equal-length sequences
(example modified from [6]_)::
>seq1 Turkey
AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
>seq2 Salmo gair
AAGCCTTGGCAGTGCAGGGTGAGCCGTGG
CCGGGCACGGTAT
>seq3 H. Sapiens
ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA
>seq4 Chimp
AAACCCTTGCCG
TTACGCTTAAAC
CGAGGCCGGGAC
ACTCAT
>seq5 Gorilla
AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
.. note:: Original copyright notice for the above example file:
*(c) Copyright 1986-2008 by The University of Washington. Written by Joseph
Felsenstein. Permission is granted to copy this document provided that no
fee is charged for it and that this copyright notice is not removed.*
Note that the sequences are not required to be of equal length in order for the
file to be a valid FASTA file (this depends on the object that you're reading
the file into). Also note that some of the sequences occur on a single line,
while others are split across multiple lines.
Let's define this file in-memory as a ``StringIO``, though this could be a real
file path, file handle, or anything that's supported by scikit-bio's I/O
registry in practice:
>>> fl = [u">seq1 Turkey\\n",
... u"AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT\\n",
... u">seq2 Salmo gair\\n",
... u"AAGCCTTGGCAGTGCAGGGTGAGCCGTGG\\n",
... u"CCGGGCACGGTAT\\n",
... u">seq3 H. Sapiens\\n",
... u"ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA\\n",
... u">seq4 Chimp\\n",
... u"AAACCCTTGCCG\\n",
... u"TTACGCTTAAAC\\n",
... u"CGAGGCCGGGAC\\n",
... u"ACTCAT\\n",
... u">seq5 Gorilla\\n",
... u"AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA\\n"]
Let's read the FASTA file into a ``SequenceCollection``:
>>> from skbio import SequenceCollection
>>> sc = SequenceCollection.read(fl)
>>> sc.sequence_lengths()
[42, 42, 42, 42, 42]
>>> sc.ids()
[u'seq1', u'seq2', u'seq3', u'seq4', u'seq5']
We see that all 5 sequences have 42 characters, and that each of the sequence
IDs were successfully read into memory.
Since these sequences are of equal length (presumably because they've been
aligned), let's load the FASTA file into an ``Alignment`` object, which is a
more appropriate data structure:
>>> from skbio import Alignment
>>> aln = Alignment.read(fl)
>>> aln.sequence_length()
42
Note that we were able to read the FASTA file into two different data
structures (``SequenceCollection`` and ``Alignment``) using the exact same
``read`` method call (and underlying reading/parsing logic). Also note that we
didn't specify a file format in the ``read`` call. The FASTA sniffer detected
the correct file format for us!
Let's inspect the type of sequences stored in the ``Alignment``:
>>> aln[0]
Sequence
------------------------------------------------
Metadata:
u'description': u'Turkey'
u'id': u'seq1'
Stats:
length: 42
------------------------------------------------
0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
By default, sequences are loaded as ``Sequence`` objects. We can
change the type of sequence via the ``constructor`` parameter:
>>> from skbio import DNA
>>> aln = Alignment.read(fl, constructor=DNA)
>>> aln[0] # doctest: +NORMALIZE_WHITESPACE
DNA
------------------------------------------------
Metadata:
u'description': u'Turkey'
u'id': u'seq1'
Stats:
length: 42
has gaps: False
has degenerates: True
has non-degenerates: True
GC-content: 54.76%
------------------------------------------------
0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
We now have an ``Alignment`` of ``DNA`` objects instead of
``Sequence`` objects.
To write the alignment in FASTA format:
>>> from io import StringIO
>>> with StringIO() as fh:
... print(aln.write(fh).getvalue())
>seq1 Turkey
AAGCTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGGTAT
>seq2 Salmo gair
AAGCCTTGGCAGTGCAGGGTGAGCCGTGGCCGGGCACGGTAT
>seq3 H. Sapiens
ACCGGTTGGCCGTTCAGGGTACAGGTTGGCCGTTCAGGGTAA
>seq4 Chimp
AAACCCTTGCCGTTACGCTTAAACCGAGGCCGGGACACTCAT
>seq5 Gorilla
AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
<BLANKLINE>
Both ``SequenceCollection`` and ``Alignment`` load all of the sequences from
the FASTA file into memory at once. If the FASTA file is large (which is often
the case), this may be infeasible if you don't have enough memory. To work
around this issue, you can stream the sequences using scikit-bio's
generator-based FASTA reader and writer. The generator-based reader yields
``Sequence`` objects (or subclasses if ``constructor`` is supplied)
one at a time, instead of loading all sequences into memory. For example, let's
use the generator-based reader to process a single sequence at a time in a
``for`` loop:
>>> import skbio.io
>>> for seq in skbio.io.read(fl, format='fasta'):
... seq
... print('')
Sequence
------------------------------------------------
Metadata:
u'description': u'Turkey'
u'id': u'seq1'
Stats:
length: 42
------------------------------------------------
0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
<BLANKLINE>
Sequence
------------------------------------------------
Metadata:
u'description': u'Salmo gair'
u'id': u'seq2'
Stats:
length: 42
------------------------------------------------
0 AAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
<BLANKLINE>
Sequence
------------------------------------------------
Metadata:
u'description': u'H. Sapiens'
u'id': u'seq3'
Stats:
length: 42
------------------------------------------------
0 ACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
<BLANKLINE>
Sequence
------------------------------------------------
Metadata:
u'description': u'Chimp'
u'id': u'seq4'
Stats:
length: 42
------------------------------------------------
0 AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
<BLANKLINE>
Sequence
------------------------------------------------
Metadata:
u'description': u'Gorilla'
u'id': u'seq5'
Stats:
length: 42
------------------------------------------------
0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
<BLANKLINE>
A single sequence can also be read into a ``Sequence`` (or subclass):
>>> from skbio import Sequence
>>> seq = Sequence.read(fl)
>>> seq
Sequence
------------------------------------------------
Metadata:
u'description': u'Turkey'
u'id': u'seq1'
Stats:
length: 42
------------------------------------------------
0 AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
By default, the first sequence in the FASTA file is read. This can be
controlled with ``seq_num``. For example, to read the fifth sequence:
>>> seq = Sequence.read(fl, seq_num=5)
>>> seq
Sequence
------------------------------------------------
Metadata:
u'description': u'Gorilla'
u'id': u'seq5'
Stats:
length: 42
------------------------------------------------
0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
We can use the same API to read the fifth sequence into a ``DNA``:
>>> dna_seq = DNA.read(fl, seq_num=5)
>>> dna_seq
DNA
------------------------------------------------
Metadata:
u'description': u'Gorilla'
u'id': u'seq5'
Stats:
length: 42
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
------------------------------------------------
0 AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA
Individual sequence objects can also be written in FASTA format:
>>> with StringIO() as fh:
... print(dna_seq.write(fh).getvalue())
>seq5 Gorilla
AAACCCTTGCCGGTACGCTTAAACCATTGCCGGTACGCTTAA
<BLANKLINE>
Reading and Writing FASTA/QUAL Files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to reading and writing standalone FASTA files, scikit-bio also
supports reading and writing FASTA and QUAL files together. Suppose we have the
following FASTA file::
>seq1 db-accession-149855
CGATGTC
>seq2 db-accession-34989
CATCG
Also suppose we have the following QUAL file::
>seq1 db-accession-149855
40 39 39 4
50 1 100
>seq2 db-accession-34989
3 3 10 42 80
>>> fasta_fl = [
... u">seq1 db-accession-149855\\n",
... u"CGATGTC\\n",
... u">seq2 db-accession-34989\\n",
... u"CATCG\\n"]
>>> qual_fl = [
... u">seq1 db-accession-149855\\n",
... u"40 39 39 4\\n",
... u"50 1 100\\n",
... u">seq2 db-accession-34989\\n",
... u"3 3 10 42 80\\n"]
To read in a single ``Sequence`` at a time, we can use the
generator-based reader as we did above, providing both FASTA and QUAL files:
>>> for seq in skbio.io.read(fasta_fl, qual=qual_fl, format='fasta'):
... seq
... print('')
Sequence
------------------------------------------
Metadata:
u'description': u'db-accession-149855'
u'id': u'seq1'
Positional metadata:
u'quality': <dtype: uint8>
Stats:
length: 7
------------------------------------------
0 CGATGTC
<BLANKLINE>
Sequence
-----------------------------------------
Metadata:
u'description': u'db-accession-34989'
u'id': u'seq2'
Positional metadata:
u'quality': <dtype: uint8>
Stats:
length: 5
-----------------------------------------
0 CATCG
<BLANKLINE>
Note that the sequence objects have quality scores stored as positional
metadata since we provided a QUAL file. The other FASTA readers operate in a
similar manner.
Now let's load the sequences and their quality scores into a
``SequenceCollection``:
>>> sc = SequenceCollection.read(fasta_fl, qual=qual_fl)
>>> sc
<SequenceCollection: n=2; mean +/- std length=6.00 +/- 1.00>
To write the sequence data and quality scores in the ``SequenceCollection`` to
FASTA and QUAL files, respectively, we run:
>>> new_fasta_fh = StringIO()
>>> new_qual_fh = StringIO()
>>> _ = sc.write(new_fasta_fh, qual=new_qual_fh)
>>> print(new_fasta_fh.getvalue())
>seq1 db-accession-149855
CGATGTC
>seq2 db-accession-34989
CATCG
<BLANKLINE>
>>> print(new_qual_fh.getvalue())
>seq1 db-accession-149855
40 39 39 4 50 1 100
>seq2 db-accession-34989
3 3 10 42 80
<BLANKLINE>
>>> new_fasta_fh.close()
>>> new_qual_fh.close()
References
----------
.. [1] Lipman, DJ; Pearson, WR (1985). "Rapid and sensitive protein similarity
searches". Science 227 (4693): 1435-41.
.. [2] http://en.wikipedia.org/wiki/FASTA_format
.. [3] http://blast.ncbi.nlm.nih.gov/blastcgihelp.shtml
.. [4] https://www.broadinstitute.org/crd/wiki/index.php/Qual
.. [5] Madden T. The BLAST Sequence Analysis Tool. 2002 Oct 9
[Updated 2003 Aug 13]. In: McEntyre J, Ostell J, editors. The NCBI Handbook
[Internet]. Bethesda (MD): National Center for Biotechnology Information
(US); 2002-. Chapter 16. Available from:
http://www.ncbi.nlm.nih.gov/books/NBK21097/
.. [6] http://evolution.genetics.washington.edu/phylip/doc/sequence.html
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import range, zip
from six.moves import zip_longest
import textwrap
import numpy as np
from skbio.io import create_format, FASTAFormatError, QUALFormatError
from skbio.io.registry import FileSentinel
from skbio.io.format._base import (_get_nth_sequence,
_parse_fasta_like_header,
_format_fasta_like_records, _line_generator,
_too_many_blanks)
from skbio.util._misc import chunk_str
from skbio.alignment import SequenceCollection, Alignment
from skbio.sequence import Sequence, DNA, RNA, Protein
fasta = create_format('fasta')
@fasta.sniffer()
def _fasta_sniffer(fh):
# Strategy:
# Ignore up to 5 blank/whitespace-only lines at the beginning of the
# file. Read up to 10 records. If at least one record is read (i.e.
# the file isn't empty) and no errors are thrown during reading, assume
# the file is in FASTA format. If a record appears to be QUAL, do *not*
# identify the file as FASTA since we don't want to sniff QUAL files as
# FASTA (technically they can be read as FASTA since the sequences may
# not be validated but it probably isn't what the user wanted). Also, if
# we add QUAL as its own file format in the future, we wouldn't want the
# FASTA and QUAL sniffers to both positively identify a QUAL file.
if _too_many_blanks(fh, 5):
return False, {}
num_records = 10
empty = True
try:
parser = _parse_fasta_raw(fh, _sniffer_data_parser, FASTAFormatError)
for _ in zip(range(num_records), parser):
empty = False
except FASTAFormatError:
return False, {}
if empty:
return False, {}
else:
return True, {}
def _sniffer_data_parser(chunks):
data = _parse_sequence_data(chunks)
try:
_parse_quality_scores(chunks)
except QUALFormatError:
return data
else:
# used for flow control within sniffer, user should never see this
# message
raise FASTAFormatError('Data appear to be quality scores.')
@fasta.reader(None)
def _fasta_to_generator(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
if qual is None:
for seq, id_, desc in _parse_fasta_raw(fh, _parse_sequence_data,
FASTAFormatError):
yield constructor(seq, metadata={'id': id_, 'description': desc},
**kwargs)
else:
fasta_gen = _parse_fasta_raw(fh, _parse_sequence_data,
FASTAFormatError)
qual_gen = _parse_fasta_raw(qual, _parse_quality_scores,
QUALFormatError)
for fasta_rec, qual_rec in zip_longest(fasta_gen, qual_gen,
fillvalue=None):
if fasta_rec is None:
raise FASTAFormatError(
"QUAL file has more records than FASTA file.")
if qual_rec is None:
raise FASTAFormatError(
"FASTA file has more records than QUAL file.")
fasta_seq, fasta_id, fasta_desc = fasta_rec
qual_scores, qual_id, qual_desc = qual_rec
if fasta_id != qual_id:
raise FASTAFormatError(
"IDs do not match between FASTA and QUAL records: %r != %r"
% (str(fasta_id), str(qual_id)))
if fasta_desc != qual_desc:
raise FASTAFormatError(
"Descriptions do not match between FASTA and QUAL "
"records: %r != %r" % (str(fasta_desc), str(qual_desc)))
# sequence and quality scores lengths are checked in constructor
yield constructor(
fasta_seq,
metadata={'id': fasta_id, 'description': fasta_desc},
positional_metadata={'quality': qual_scores}, **kwargs)
@fasta.reader(Sequence)
def _fasta_to_biological_sequence(fh, qual=FileSentinel, seq_num=1):
return _get_nth_sequence(
_fasta_to_generator(fh, qual=qual, constructor=Sequence),
seq_num)
@fasta.reader(DNA)
def _fasta_to_dna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
return _get_nth_sequence(
_fasta_to_generator(fh, qual=qual,
constructor=DNA, **kwargs),
seq_num)
@fasta.reader(RNA)
def _fasta_to_rna_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
return _get_nth_sequence(
_fasta_to_generator(fh, qual=qual,
constructor=RNA, **kwargs),
seq_num)
@fasta.reader(Protein)
def _fasta_to_protein_sequence(fh, qual=FileSentinel, seq_num=1, **kwargs):
return _get_nth_sequence(
_fasta_to_generator(fh, qual=qual,
constructor=Protein, **kwargs),
seq_num)
@fasta.reader(SequenceCollection)
def _fasta_to_sequence_collection(fh, qual=FileSentinel,
constructor=Sequence, **kwargs):
return SequenceCollection(
list(_fasta_to_generator(fh, qual=qual, constructor=constructor,
**kwargs)))
@fasta.reader(Alignment)
def _fasta_to_alignment(fh, qual=FileSentinel, constructor=Sequence, **kwargs):
return Alignment(
list(_fasta_to_generator(fh, qual=qual, constructor=constructor,
**kwargs)))
@fasta.writer(None)
def _generator_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ', max_width=None,
lowercase=None):
if max_width is not None:
if max_width < 1:
raise ValueError(
"Maximum line width must be greater than zero (max_width=%d)."
% max_width)
if qual is not None:
# define text wrapper for splitting quality scores here for
# efficiency. textwrap docs recommend reusing a TextWrapper
# instance when it is used many times. configure text wrapper to
# never break "words" (i.e., integer quality scores) across lines
qual_wrapper = textwrap.TextWrapper(
width=max_width, break_long_words=False,
break_on_hyphens=False)
formatted_records = _format_fasta_like_records(
obj, id_whitespace_replacement, description_newline_replacement,
qual is not None, lowercase)
for header, seq_str, qual_scores in formatted_records:
if max_width is not None:
seq_str = chunk_str(seq_str, max_width, '\n')
fh.write('>%s\n%s\n' % (header, seq_str))
if qual is not None:
qual_str = ' '.join(np.asarray(qual_scores, dtype=np.str))
if max_width is not None:
qual_str = qual_wrapper.fill(qual_str)
qual.write('>%s\n%s\n' % (header, qual_str))
@fasta.writer(Sequence)
def _biological_sequence_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ',
max_width=None):
_sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width)
@fasta.writer(DNA)
def _dna_sequence_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ',
max_width=None, lowercase=None):
_sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width, lowercase)
@fasta.writer(RNA)
def _rna_sequence_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ',
max_width=None, lowercase=None):
_sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width, lowercase)
@fasta.writer(Protein)
def _protein_sequence_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ',
max_width=None, lowercase=None):
_sequences_to_fasta([obj], fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width, lowercase)
@fasta.writer(SequenceCollection)
def _sequence_collection_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ',
max_width=None, lowercase=None):
_sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width, lowercase)
@fasta.writer(Alignment)
def _alignment_to_fasta(obj, fh, qual=FileSentinel,
id_whitespace_replacement='_',
description_newline_replacement=' ', max_width=None,
lowercase=None):
_sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width, lowercase)
def _parse_fasta_raw(fh, data_parser, error_type):
"""Raw parser for FASTA or QUAL files.
Returns raw values (seq/qual, id, description). It is the responsibility of
the caller to construct the correct in-memory object to hold the data.
"""
# Skip any blank or whitespace-only lines at beginning of file
seq_header = next(_line_generator(fh, skip_blanks=True))
# header check inlined here and below for performance
if seq_header.startswith('>'):
id_, desc = _parse_fasta_like_header(seq_header)
else:
raise error_type(
"Found non-header line when attempting to read the 1st record:"
"\n%s" % seq_header)
data_chunks = []
prev = seq_header
for line in _line_generator(fh, skip_blanks=False):
if line.startswith('>'):
# new header, so yield current record and reset state
yield data_parser(data_chunks), id_, desc
data_chunks = []
id_, desc = _parse_fasta_like_header(line)
else:
if line:
# ensure no blank lines within a single record
if not prev:
raise error_type(
"Found blank or whitespace-only line within record.")
data_chunks.append(line)
prev = line
# yield last record in file
yield data_parser(data_chunks), id_, desc
def _parse_sequence_data(chunks):
if not chunks:
raise FASTAFormatError("Found header without sequence data.")
return ''.join(chunks)
def _parse_quality_scores(chunks):
if not chunks:
raise QUALFormatError("Found header without quality scores.")
qual_str = ' '.join(chunks)
try:
quality = np.asarray(qual_str.split(), dtype=int)
except ValueError:
raise QUALFormatError(
"Could not convert quality scores to integers:\n%s"
% str(qual_str))
if (quality < 0).any():
raise QUALFormatError(
"Encountered negative quality score(s). Quality scores must be "
"greater than or equal to zero.")
if (quality > 255).any():
raise QUALFormatError(
"Encountered quality score(s) greater than 255. scikit-bio only "
"supports quality scores in the range 0-255 (inclusive) when "
"reading QUAL files.")
return quality.astype(np.uint8, casting='unsafe', copy=False)
def _sequences_to_fasta(obj, fh, qual, id_whitespace_replacement,
description_newline_replacement, max_width,
lowercase=None):
def seq_gen():
for seq in obj:
yield seq
_generator_to_fasta(
seq_gen(), fh, qual=qual,
id_whitespace_replacement=id_whitespace_replacement,
description_newline_replacement=description_newline_replacement,
max_width=max_width, lowercase=lowercase)
| bsd-3-clause |
gitlabhq/pygments.rb | vendor/pygments-main/pygments/filter.py | 365 | 2071 | # -*- coding: utf-8 -*-
"""
pygments.filter
~~~~~~~~~~~~~~~
Module that implements the default filter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
def simplefilter(f):
"""
Decorator that converts a function into a filter::
@simplefilter
def lowercase(lexer, stream, options):
for ttype, value in stream:
yield ttype, value.lower()
"""
return type(f.__name__, (FunctionFilter,), {
'function': f,
'__module__': getattr(f, '__module__'),
'__doc__': f.__doc__
})
class Filter(object):
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
class FunctionFilter(Filter):
"""
Abstract class used by `simplefilter` to create simple
function filters on the fly. The `simplefilter` decorator
automatically creates subclasses of this class for
functions passed to it.
"""
function = None
def __init__(self, **options):
if not hasattr(self, 'function'):
raise TypeError('%r used without bound function' %
self.__class__.__name__)
Filter.__init__(self, **options)
def filter(self, lexer, stream):
# pylint: disable-msg=E1102
for ttype, value in self.function(lexer, stream, self.options):
yield ttype, value
| mit |
SPACEDAC7/TrabajoFinalGrado | StaticAnalyzer/tools/enjarify/enjarify/dalvikformats.py | 30 | 4154 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import util
# Code for parsing the various Dalvik opcode formats
INSTRUCTION_FORMAT = util.keysToRanges({
0x00: '10x',
0x01: '12x',
0x02: '22x',
0x03: '32x',
0x04: '12x',
0x05: '22x',
0x06: '32x',
0x07: '12x',
0x08: '22x',
0x09: '32x',
0x0a: '11x',
0x0b: '11x',
0x0c: '11x',
0x0d: '11x',
0x0e: '10x',
0x0f: '11x',
0x10: '11x',
0x11: '11x',
0x12: '11n',
0x13: '21s',
0x14: '31i',
0x15: '21h',
0x16: '21s',
0x17: '31i',
0x18: '51l',
0x19: '21h',
0x1a: '21c',
0x1b: '31c',
0x1c: '21c',
0x1d: '11x',
0x1e: '11x',
0x1f: '21c',
0x20: '22c',
0x21: '12x',
0x22: '21c',
0x23: '22c',
0x24: '35c',
0x25: '3rc',
0x26: '31t',
0x27: '11x',
0x28: '10t',
0x29: '20t',
0x2a: '30t',
0x2b: '31t',
0x2c: '31t',
0x2d: '23x',
0x32: '22t',
0x38: '21t',
0x3e: '10x',
0x44: '23x',
0x52: '22c',
0x60: '21c',
0x6e: '35c',
0x73: '10x',
0x74: '3rc',
0x79: '10x',
0x7b: '12x',
0x90: '23x',
0xb0: '12x',
0xd0: '22s',
0xd8: '22b',
0xe3: '10x',
}, 256)
# parsing funcs
def p00op(w): return []
def pBAop(w): return [(w >> 8) & 0xF, w >> 12]
def pAAop(w): return [w >> 8]
def p00opAAAA(w, w2): return [w2]
def pAAopBBBB(w, w2): return [w >> 8, w2]
def pAAopCCBB(w, w2): return [w >> 8, w2 & 0xFF, w2 >> 8]
def pBAopCCCC(w, w2): return [(w >> 8) & 0xF, w >> 12, w2]
def p00opAAAAAAAA(w, w2, w3): return [w2 ^ (w3 << 16)]
def p00opAAAABBBB(w, w2, w3): return [w2, w3]
def pAAopBBBBBBBB(w, w2, w3): return [w >> 8, w2 ^ (w3 << 16)]
def pAGopBBBBFEDC(w, w2, w3):
a = w >> 12
c, d, e, f = (w3) & 0xF, (w3 >> 4) & 0xF, (w3 >> 8) & 0xF, (w3 >> 12) & 0xF
g = (w >> 8) & 0xF
return [w2, [c, d, e, f, g][:a]]
def pAAopBBBBCCCC(w, w2, w3):
a = w >> 8
return [w2, range(w3, w3+a)]
def pAAopBBBBBBBBBBBBBBBB(w, w2, w3, w4, w5):
b = w2 ^ (w3 << 16) ^ (w4 << 32) ^ (w5 << 48)
return [w >> 8, b]
_FUNC = {
'10x': p00op,
'12x': pBAop,
'11n': pBAop,
'11x': pAAop,
'10t': pAAop,
'20t': p00opAAAA,
'22x': pAAopBBBB,
'21t': pAAopBBBB,
'21s': pAAopBBBB,
'21h': pAAopBBBB,
'21c': pAAopBBBB,
'23x': pAAopCCBB,
'22b': pAAopCCBB,
'22t': pBAopCCCC,
'22s': pBAopCCCC,
'22c': pBAopCCCC,
'30t': p00opAAAAAAAA,
'32x': p00opAAAABBBB,
'31i': pAAopBBBBBBBB,
'31t': pAAopBBBBBBBB,
'31c': pAAopBBBBBBBB,
'35c': pAGopBBBBFEDC,
'3rc': pAAopBBBBCCCC,
'51l': pAAopBBBBBBBBBBBBBBBB,
}
def sign(x, bits):
if x >= (1 << (bits-1)):
x -= 1 << bits
return x
def decode(shorts, pos, opcode):
fmt = INSTRUCTION_FORMAT[opcode]
size = int(fmt[0])
results = _FUNC[fmt](*shorts[pos:pos+size])
# Check if we need to sign extend
if fmt[2] == 'n':
results[-1] = sign(results[-1], 4)
elif fmt[2] == 'b' or (fmt[2] == 't' and size == 1):
results[-1] = sign(results[-1], 8)
elif fmt[2] == 's' or (fmt[2] == 't' and size == 2):
results[-1] = sign(results[-1], 16)
elif fmt[2] == 't' and size == 3:
results[-1] = sign(results[-1], 32)
# Hats depend on actual size expected, so we rely on opcode as a hack
if fmt[2] == 'h':
assert(opcode == 0x15 or opcode == 0x19)
results[-1] = results[-1] << (16 if opcode == 0x15 else 48)
# Convert code offsets to actual code position
if fmt[2] == 't':
results[-1] += pos
return pos + size, results
| gpl-3.0 |
Himon-SYNCRAFT/taskplus | tests/core/actions/test_get_task_status_details.py | 1 | 3408 | from unittest import mock
from taskplus.core.actions import (GetTaskStatusDetailsAction,
GetTaskStatusDetailsRequest)
from taskplus.core.domain import TaskStatus
from taskplus.core.shared.response import ResponseFailure
def test_get_status_details_action():
status = mock.Mock()
status = TaskStatus(name='new', id=1)
statuses_repo = mock.Mock()
statuses_repo.one.return_value = status
request = GetTaskStatusDetailsRequest(status.id)
action = GetTaskStatusDetailsAction(statuses_repo)
response = action.execute(request)
assert bool(response) is True
statuses_repo.one.assert_called_once_with(status.id)
assert response.value == status
def test_get_status_details_action_with_hooks():
status = mock.Mock()
status = TaskStatus(name='new', id=1)
statuses_repo = mock.Mock()
statuses_repo.one.return_value = status
request = GetTaskStatusDetailsRequest(status.id)
action = GetTaskStatusDetailsAction(statuses_repo)
before = mock.MagicMock()
after = mock.MagicMock()
action.add_before_execution_hook(before)
action.add_after_execution_hook(after)
response = action.execute(request)
assert before.called
assert after.called
assert bool(response) is True
statuses_repo.one.assert_called_once_with(status.id)
assert response.value == status
def test_get_status_details_action_handles_bad_request():
status = mock.Mock()
status = TaskStatus(name='new', id=1)
statuses_repo = mock.Mock()
statuses_repo.one.return_value = status
request = GetTaskStatusDetailsRequest(status_id=None)
action = GetTaskStatusDetailsAction(statuses_repo)
response = action.execute(request)
assert bool(response) is False
assert not statuses_repo.one.called
assert response.value == {
'type': ResponseFailure.PARAMETER_ERROR,
'message': 'status_id: is required'
}
def test_get_status_details_action_handles_generic_error():
error_message = 'Error!!!'
statuses_repo = mock.Mock()
statuses_repo.one.side_effect = Exception(error_message)
request = GetTaskStatusDetailsRequest(status_id=1)
action = GetTaskStatusDetailsAction(statuses_repo)
response = action.execute(request)
assert bool(response) is False
statuses_repo.one.assert_called_once_with(1)
assert response.value == {
'type': ResponseFailure.SYSTEM_ERROR,
'message': 'Exception: {}'.format(error_message)
}
def test_get_status_details_request():
status_id = 1
request = GetTaskStatusDetailsRequest(status_id)
assert request.is_valid()
assert request.status_id == status_id
def test_get_status_details_request_without_id():
status_id = None
request = GetTaskStatusDetailsRequest(status_id)
assert not request.is_valid()
assert request.status_id == status_id
assert len(request.errors) == 1
error = request.errors[0]
assert error.parameter == 'status_id'
assert error.message == 'is required'
def test_get_status_details_bad_request():
status_id = 'asd'
request = GetTaskStatusDetailsRequest(status_id)
assert not request.is_valid()
assert request.status_id == status_id
assert len(request.errors) == 1
error = request.errors[0]
assert error.parameter == 'status_id'
assert error.message == 'expected int, got str(asd)'
| bsd-3-clause |
TOCyna/tabelinha | flask/lib/python2.7/site-packages/werkzeug/test.py | 32 | 34230 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if not isinstance(value, string_types):
value = str(value)
else:
value = to_bytes(value, charset)
write('\r\n\r\n')
write_binary(value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self._files:
return 'multipart/form-data'
elif self._form:
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
#py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
status_code = int(response[1].split(None, 1)[0])
if status_code == 307:
method = environ['REQUEST_METHOD']
else:
method = 'GET'
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered, method=method)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
method = 'GET'
if status_code == 307:
method = environ['REQUEST_METHOD']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ,
buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
| gpl-2.0 |
ppmt/Crust | flask/lib/python2.7/site-packages/setuptools/depends.py | 462 | 6370 | import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools import compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| gpl-2.0 |
kennydude/django-rest-framework | tests/test_htmlrenderer.py | 79 | 4406 | from __future__ import unicode_literals
import django.template.loader
from django.conf.urls import url
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.template import Template, TemplateDoesNotExist
from django.test import TestCase
from django.utils import six
from rest_framework import status
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
@api_view(('GET',))
@renderer_classes((TemplateHTMLRenderer,))
def example(request):
"""
A view that can returns an HTML representation.
"""
data = {'object': 'foobar'}
return Response(data, template_name='example.html')
@api_view(('GET',))
@renderer_classes((TemplateHTMLRenderer,))
def permission_denied(request):
raise PermissionDenied()
@api_view(('GET',))
@renderer_classes((TemplateHTMLRenderer,))
def not_found(request):
raise Http404()
urlpatterns = [
url(r'^$', example),
url(r'^permission_denied$', permission_denied),
url(r'^not_found$', not_found),
]
class TemplateHTMLRendererTests(TestCase):
urls = 'tests.test_htmlrenderer'
def setUp(self):
"""
Monkeypatch get_template
"""
self.get_template = django.template.loader.get_template
def get_template(template_name, dirs=None):
if template_name == 'example.html':
return Template("example: {{ object }}")
raise TemplateDoesNotExist(template_name)
def select_template(template_name_list, dirs=None, using=None):
if template_name_list == ['example.html']:
return Template("example: {{ object }}")
raise TemplateDoesNotExist(template_name_list[0])
django.template.loader.get_template = get_template
django.template.loader.select_template = select_template
def tearDown(self):
"""
Revert monkeypatching
"""
django.template.loader.get_template = self.get_template
def test_simple_html_view(self):
response = self.client.get('/')
self.assertContains(response, "example: foobar")
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_not_found_html_view(self):
response = self.client.get('/not_found')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.content, six.b("404 Not Found"))
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_permission_denied_html_view(self):
response = self.client.get('/permission_denied')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.content, six.b("403 Forbidden"))
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
class TemplateHTMLRendererExceptionTests(TestCase):
urls = 'tests.test_htmlrenderer'
def setUp(self):
"""
Monkeypatch get_template
"""
self.get_template = django.template.loader.get_template
def get_template(template_name):
if template_name == '404.html':
return Template("404: {{ detail }}")
if template_name == '403.html':
return Template("403: {{ detail }}")
raise TemplateDoesNotExist(template_name)
django.template.loader.get_template = get_template
def tearDown(self):
"""
Revert monkeypatching
"""
django.template.loader.get_template = self.get_template
def test_not_found_html_view_with_template(self):
response = self.client.get('/not_found')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(response.content in (
six.b("404: Not found"), six.b("404 Not Found")))
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_permission_denied_html_view_with_template(self):
response = self.client.get('/permission_denied')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(response.content in (
six.b("403: Permission denied"), six.b("403 Forbidden")))
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
| bsd-2-clause |
codeforamerica/comport | migrations/versions/0d78d545906f_.py | 1 | 1135 | """Add 'is_public' flags for datasets
Revision ID: 0d78d545906f
Revises: 6d30846080b2
Create Date: 2016-06-27 15:30:14.415519
"""
# revision identifiers, used by Alembic.
revision = '0d78d545906f'
down_revision = '6d30846080b2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('departments', sa.Column('is_public_assaults_on_officers', sa.Boolean(), server_default=sa.true(), nullable=False))
op.add_column('departments', sa.Column('is_public_citizen_complaints', sa.Boolean(), server_default=sa.true(), nullable=False))
op.add_column('departments', sa.Column('is_public_officer_involved_shootings', sa.Boolean(), server_default=sa.true(), nullable=False))
op.add_column('departments', sa.Column('is_public_use_of_force_incidents', sa.Boolean(), server_default=sa.true(), nullable=False))
def downgrade():
op.drop_column('departments', 'is_public_use_of_force_incidents')
op.drop_column('departments', 'is_public_officer_involved_shootings')
op.drop_column('departments', 'is_public_citizen_complaints')
op.drop_column('departments', 'is_public_assaults_on_officers')
| bsd-3-clause |
alihalabyah/flexx | exp/wgui.py | 22 | 5614 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Almar Klein
"""
Little experiment for the purpose for creating a GUI toolkit based on
web technologies like HTML/CSS/JS.
Applications build with such a GUI can be easily deployed on all
platforms and also run in a web browser...
Usefull links:
* http://www.aclevername.com/articles/python-webgui/
"""
import time
#from zoof.qt import QtCore, QtGui, QtWebKit
from PyQt4 import QtCore, QtGui, QtWebKit
HTML = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<head>
<title></title>
<link href="demo.css" rel="stylesheet" type="text/css"></link>
<!-- <script src="jquery-1.11.1.min.js"></script> -->
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script type="text/javascript">
// <![CDATA[
function send(msg) {
// we communicate to Python by modifying the title
document.title = "null";
document.title = msg;
}
function got_a_click(e) {
send('got-a-click:' + e.target.id);
}
function got_a_move(e) {
if (e.clientX & e.clientY) {
//send('got-a-move:' + e.target.id);
send('got-a-move:' + e.target.id + '-' + e.clientX + ',' + e.clientY);
}
}
$(document).ready(function() {
$('#messages').click(got_a_click);
//send($.toJSON('document.ready'));
send('document.ready');
})
// ]]>
</script>
</head>
<body>
<h1>Python + Web GUI Demo</h1>
<h2>Uptime</h2>
<p class="uptime">
Python uptime:
<span id="uptime-value">?</span> seconds.
</p>
<h2>Messages</h2>
<p id="messages">
Click here (yes, anywhere here)...<br/>
</p>
</body>
</html>
"""
class Page(QtWebKit.QWebPage):
""" Subclass Pagse to catch JS errors and prompts.
"""
def javaScriptConsoleMessage(self, msg, linenr, sourceID):
print('ERROR: on line %i in %r: %s' % (linenr, sourceID, msg))
def javaScriptAlert(self, frame, msg):
print('ALERT:', msg)
def javaScriptConfirm(self, frame, msg):
while True:
a = input('Need confirm from JS: msg [Y/n] ')
if not a or a.lower() == 'y':
return True
elif a.lower() == 'n':
return False
def javaScriptPrompt(self, frame, *args):
pass # todo
class Main(QtWebKit.QWebView):
""" Our main application window.
"""
def __init__(self):
super().__init__(None)
self.setPage(Page(self))
self.page().mainFrame().setHtml(HTML)
self.titleChanged.connect(self.on_title_changed)
self._timer = QtCore.QTimer()
self._timer.setSingleShot(False)
self._timer.timeout.connect(self.on_timer)
self._timer.start(207)
self._t0 = time.time()
def on_error(self, msg):
print('ERROR:', msg)
def on_timer(self):
t = time.time() - self._t0
msg = 'document.getElementById("uptime-value").innerHTML = %1.01f' % t
self.web_send(msg)
def web_send(self, msg):
f = self.page().mainFrame()
f.evaluateJavaScript(msg)
def on_title_changed(self, title):
if title == 'null':
return
print('MSG:', title)
if title.startswith("got-a-move:test-widget"):
xy = title.split('-')[-1]
x, y = [int(i)-20 for i in xy.split(',')]
msg = 'document.getElementById("test-widget").style.left = "%ipx";' % x
msg += 'document.getElementById("test-widget").style.top = "%ipx";' % y
self.web_send(msg)
print(title)
if title == "got-a-click:messages":
#self.web_send("confirm('Please confitm');")
#self.web_send("alert('wooot');")
self.web_send("""
$(document.body).append("<div id='test-widget' class='draggable'>This is a paragraph</div>");
$("#test-widget").css({ "width": "100px",
"height": "35px",
"position":"absolute",
"top":"100px",
"left":"100px",
"background": "red",
"overflow":"hidden",
"user-select": "none",
"handle": "",
"cursor": "move",
});
// Implement some dragging (sort of)
$("#test-widget")._down = false;
$("#test-widget").mousedown(function(e){this._down=true});
$("#test-widget").mouseup(function(e){this._down=false});
$("#test-widget").mouseleave(function(e){this._down=false});
$("#test-widget").mousemove(function(e){if (this._down) {got_a_move(e);}});
""")
if __name__ == '__main__':
app = QtGui.QApplication([])
m = Main()
m.show()
app.exec_()
| bsd-2-clause |
thomaskeck/root | interpreter/llvm/src/examples/Kaleidoscope/MCJIT/cached/genk-timing.py | 214 | 10499 | #!/usr/bin/env python
import sys
import random
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.timeFile = outputname
self.shfile = open(scriptname, 'w')
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
"""Echo some comments and invoke both versions of toy"""
rootname = filename
if '.' in filename:
rootname = filename[:filename.rfind('.')]
self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class KScriptGenerator:
"""Used to generate random Kaleidoscope code"""
def __init__(self, filename):
self.kfile = open(filename, 'w')
self.nextFuncNum = 1
self.lastFuncNum = None
self.callWeighting = 0.1
# A mapping of calls within functions with no duplicates
self.calledFunctionTable = {}
# A list of function calls which will actually be executed
self.calledFunctions = []
# A comprehensive mapping of calls within functions
# used for computing the total number of calls
self.comprehensiveCalledFunctionTable = {}
self.totalCallsExecuted = 0
def updateTotalCallCount(self, callee):
# Count this call
self.totalCallsExecuted += 1
# Then count all the functions it calls
if callee in self.comprehensiveCalledFunctionTable:
for child in self.comprehensiveCalledFunctionTable[callee]:
self.updateTotalCallCount(child)
def updateFunctionCallMap(self, caller, callee):
"""Maintains a map of functions that are called from other functions"""
if not caller in self.calledFunctionTable:
self.calledFunctionTable[caller] = []
if not callee in self.calledFunctionTable[caller]:
self.calledFunctionTable[caller].append(callee)
if not caller in self.comprehensiveCalledFunctionTable:
self.comprehensiveCalledFunctionTable[caller] = []
self.comprehensiveCalledFunctionTable[caller].append(callee)
def updateCalledFunctionList(self, callee):
"""Maintains a list of functions that will actually be called"""
# Update the total call count
self.updateTotalCallCount(callee)
# If this function is already in the list, don't do anything else
if callee in self.calledFunctions:
return
# Add this function to the list of those that will be called.
self.calledFunctions.append(callee)
# If this function calls other functions, add them too
if callee in self.calledFunctionTable:
for subCallee in self.calledFunctionTable[callee]:
self.updateCalledFunctionList(subCallee)
def setCallWeighting(self, weight):
""" Sets the probably of generating a function call"""
self.callWeighting = weight
def writeln(self, line):
self.kfile.write(line + '\n')
def writeComment(self, comment):
self.writeln('# ' + comment)
def writeEmptyLine(self):
self.writeln("")
def writePredefinedFunctions(self):
self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
self.writeComment("and just returns the RHS.")
self.writeln("def binary : 1 (x y) y;")
self.writeEmptyLine()
self.writeComment("Helper functions defined within toy")
self.writeln("extern putchard(x);")
self.writeln("extern printd(d);")
self.writeln("extern printlf();")
self.writeEmptyLine()
self.writeComment("Print the result of a function call")
self.writeln("def printresult(N Result)")
self.writeln(" # 'result('")
self.writeln(" putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
self.writeln(" printd(N) :");
self.writeln(" # ') = '")
self.writeln(" putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
self.writeln(" printd(Result) :");
self.writeln(" printlf();")
self.writeEmptyLine()
def writeRandomOperation(self, LValue, LHS, RHS):
shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
if shouldCallFunc:
funcToCall = random.randrange(1, self.lastFuncNum - 1)
self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
self.writeln(" %s = func%d(%s, %s) :" % (LValue, funcToCall, LHS, RHS))
else:
possibleOperations = ["+", "-", "*", "/"]
operation = random.choice(possibleOperations)
if operation == "-":
# Don't let our intermediate value become zero
# This is complicated by the fact that '<' is our only comparison operator
self.writeln(" if %s < %s then" % (LHS, RHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else if %s < %s then" % (RHS, LHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else")
self.writeln(" %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
else:
self.writeln(" %s = %s %s %s :" % (LValue, LHS, operation, RHS))
def getNextFuncNum(self):
result = self.nextFuncNum
self.nextFuncNum += 1
self.lastFuncNum = result
return result
def writeFunction(self, elements):
funcNum = self.getNextFuncNum()
self.writeComment("Auto-generated function number %d" % funcNum)
self.writeln("def func%d(X Y)" % funcNum)
self.writeln(" var temp1 = X,")
self.writeln(" temp2 = Y,")
self.writeln(" temp3 in")
# Initialize the variable names to be rotated
first = "temp3"
second = "temp1"
third = "temp2"
# Write some random operations
for i in range(elements):
self.writeRandomOperation(first, second, third)
# Rotate the variables
temp = first
first = second
second = third
third = temp
self.writeln(" " + third + ";")
self.writeEmptyLine()
def writeFunctionCall(self):
self.writeComment("Call the last function")
arg1 = random.uniform(1, 100)
arg2 = random.uniform(1, 100)
self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
self.writeEmptyLine()
self.updateCalledFunctionList(self.lastFuncNum)
def writeFinalFunctionCounts(self):
self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
""" Generate a random Kaleidoscope script based on the given parameters """
print "Generating " + filename
print(" %d functions, %d elements per function, %d functions between execution" %
(numFuncs, elementsPerFunc, funcsBetweenExec))
print(" Call weighting = %f" % callWeighting)
script = KScriptGenerator(filename)
script.setCallWeighting(callWeighting)
script.writeComment("===========================================================================")
script.writeComment("Auto-generated script")
script.writeComment(" %d functions, %d elements per function, %d functions between execution"
% (numFuncs, elementsPerFunc, funcsBetweenExec))
script.writeComment(" call weighting = %f" % callWeighting)
script.writeComment("===========================================================================")
script.writeEmptyLine()
script.writePredefinedFunctions()
funcsSinceLastExec = 0
for i in range(numFuncs):
script.writeFunction(elementsPerFunc)
funcsSinceLastExec += 1
if funcsSinceLastExec == funcsBetweenExec:
script.writeFunctionCall()
funcsSinceLastExec = 0
# Always end with a function call
if funcsSinceLastExec > 0:
script.writeFunctionCall()
script.writeEmptyLine()
script.writeFinalFunctionCounts()
funcsCalled = len(script.calledFunctions)
print " Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
# Execution begins here
random.seed()
timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
dataSets = [(5000, 3, 50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
(1000, 3, 10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
( 200, 3, 2, 0.50), ( 200, 10, 40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
# Generate the code
for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
print "All done!"
| lgpl-2.1 |
forseti-security/forseti-security | google/cloud/forseti/common/gcp_api/admin_directory.py | 1 | 10459 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Admin Directory API client."""
from builtins import object
from googleapiclient import errors
from httplib2 import HttpLib2Error
from google.auth.exceptions import RefreshError
from google.cloud.forseti.common.gcp_api import _base_repository
from google.cloud.forseti.common.gcp_api import api_helpers
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.gcp_api import repository_mixins
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
API_NAME = 'admin'
REQUIRED_SCOPES = frozenset([
'https://www.googleapis.com/auth/admin.directory.group.readonly',
'https://www.googleapis.com/auth/admin.directory.user.readonly'
])
GSUITE_AUTH_FAILURE_MESSAGE = (
'Failed to retrieve G Suite data due to authentication '
'failure. Please make sure your forseti_server_config.yaml '
'file contains the most updated information and enable G '
'Suite Groups Collection if you haven\'t done so. Instructions'
' on how to enable: https://forsetisecurity.org/docs/latest/'
'configure/inventory/gsuite.html')
class AdminDirectoryRepositoryClient(_base_repository.BaseRepositoryClient):
"""Admin Directory API Respository Client."""
def __init__(self,
credentials,
quota_max_calls=None,
quota_period=1.0,
use_rate_limiter=True,
cache_discovery=False,
cache=None):
"""Constructor.
Args:
credentials (object): An google.auth credentials object. The admin
directory API needs a service account credential with delegated
super admin role.
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
cache_discovery (bool): When set to true, googleapiclient will cache
HTTP requests to API discovery endpoints.
cache (googleapiclient.discovery_cache.base.Cache): instance of a
class that can cache API discovery documents. If None,
googleapiclient will attempt to choose a default.
"""
if not quota_max_calls:
use_rate_limiter = False
self._groups = None
self._members = None
self._users = None
super(AdminDirectoryRepositoryClient, self).__init__(
API_NAME, versions=['directory_v1'],
credentials=credentials,
quota_max_calls=quota_max_calls,
quota_period=quota_period,
use_rate_limiter=use_rate_limiter,
cache_discovery=cache_discovery,
cache=cache)
# Turn off docstrings for properties.
# pylint: disable=missing-return-doc, missing-return-type-doc
@property
def groups(self):
"""Returns an _AdminDirectoryGroupsRepository instance."""
if not self._groups:
self._groups = self._init_repository(
_AdminDirectoryGroupsRepository)
return self._groups
@property
def members(self):
"""Returns an _AdminDirectoryMembersRepository instance."""
if not self._members:
self._members = self._init_repository(
_AdminDirectoryMembersRepository)
return self._members
@property
def users(self):
"""Returns an _AdminDirectoryUsersRepository instance."""
if not self._users:
self._users = self._init_repository(
_AdminDirectoryUsersRepository)
return self._users
# pylint: enable=missing-return-doc, missing-return-type-doc
class _AdminDirectoryGroupsRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Groups repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryGroupsRepository, self).__init__(
key_field='', component='groups', **kwargs)
class _AdminDirectoryMembersRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Members repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryMembersRepository, self).__init__(
key_field='groupKey', component='members', **kwargs)
class _AdminDirectoryUsersRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Admin Directory Users repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_AdminDirectoryUsersRepository, self).__init__(
key_field='', component='users', **kwargs)
class AdminDirectoryClient(object):
"""GSuite Admin Directory API Client."""
def __init__(self, global_configs, **kwargs):
"""Initialize.
Args:
global_configs (dict): Global configurations.
**kwargs (dict): The kwargs.
"""
credentials = api_helpers.get_delegated_credential(
global_configs.get('domain_super_admin_email'),
REQUIRED_SCOPES)
max_calls, quota_period = api_helpers.get_ratelimiter_config(
global_configs, API_NAME)
cache_discovery = global_configs[
'cache_discovery'] if 'cache_discovery' in global_configs else False
self.repository = AdminDirectoryRepositoryClient(
credentials=credentials,
quota_max_calls=max_calls,
quota_period=quota_period,
use_rate_limiter=kwargs.get('use_rate_limiter', True),
cache_discovery=cache_discovery,
cache=global_configs.get('cache'))
def get_group_members(self, group_key):
"""Get all the members for specified groups.
Args:
group_key (str): The group's unique id assigned by the Admin API.
Returns:
list: A list of member objects from the API.
Raises:
api_errors.ApiExecutionError: If group member retrieval fails.
"""
try:
paged_results = self.repository.members.list(group_key)
result = api_helpers.flatten_list_results(paged_results, 'members')
LOGGER.debug('Getting all the members for group_key = %s,'
' result = %s', group_key, result)
return result
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(group_key, e)
def get_groups(self, customer_id='my_customer'):
"""Get all the groups for a given customer_id.
A note on customer_id='my_customer'. This is a magic string instead
of using the real customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id (str): The customer id to scope the request to.
Returns:
list: A list of group objects returned from the API.
Raises:
api_errors.ApiExecutionError: If groups retrieval fails.
RefreshError: If the authentication fails.
"""
try:
paged_results = self.repository.groups.list(customer=customer_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'groups')
LOGGER.debug('Getting all the groups for customer_id = %s,'
' flattened_results = %s',
customer_id, flattened_results)
return flattened_results
except RefreshError as e:
# Authentication failed, log before raise.
LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)
raise e
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('groups', e)
def get_users(self, customer_id='my_customer'):
"""Get all the users for a given customer_id.
A note on customer_id='my_customer'. This is a magic string instead
of using the real customer id. See:
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups
Args:
customer_id (str): The customer id to scope the request to.
Returns:
list: A list of user objects returned from the API.
Raises:
api_errors.ApiExecutionError: If groups retrieval fails.
RefreshError: If the authentication fails.
"""
try:
paged_results = self.repository.users.list(customer=customer_id,
viewType='admin_view')
flattened_results = api_helpers.flatten_list_results(
paged_results, 'users')
LOGGER.debug('Getting all the users for customer_id = %s,'
' flattened_results = %s',
customer_id, flattened_results)
return flattened_results
except RefreshError as e:
# Authentication failed, log before raise.
LOGGER.exception(GSUITE_AUTH_FAILURE_MESSAGE)
raise e
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('users', e)
| apache-2.0 |
kwlzn/pants | src/python/pants/base/payload_field.py | 8 | 4251 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
from abc import abstractmethod
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.util.meta import AbstractClass
def stable_json_dumps(obj):
return json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True)
def stable_json_sha1(obj):
"""
:API: public
"""
return sha1(stable_json_dumps(obj)).hexdigest()
def combine_hashes(hashes):
"""A simple helper function to combine other hashes. Sorts the hashes before rolling them in."""
hasher = sha1()
for h in sorted(hashes):
hasher.update(h)
return hasher.hexdigest()
class PayloadField(AbstractClass):
"""An immutable, hashable structure to be mixed into Payload instances.
:API: public
"""
_fingerprint_memo = None
def fingerprint(self):
"""A memoized sha1 hexdigest hashing the contents of this PayloadField
The fingerprint returns either a bytestring or None. If the return is None, consumers of the
fingerprint may choose to elide this PayloadField from their combined hash computation.
:API: public
"""
if self._fingerprint_memo is None:
self._fingerprint_memo = self._compute_fingerprint()
return self._fingerprint_memo
def mark_dirty(self):
"""Invalidates the memoized fingerprint for this field.
Exposed for testing.
:API: public
"""
self._fingerprint_memo = None
@abstractmethod
def _compute_fingerprint(self):
"""This method will be called and the result memoized for ``PayloadField.fingerprint``."""
pass
@property
def value(self):
"""
:API: public
"""
return self
class FingerprintedMixin(object):
"""Mixin this class to make your class suitable for passing to FingerprintedField.
:API: public
"""
def fingerprint(self):
"""Override this method to implement a fingerprint for your class.
:API: public
:returns: a sha1 hexdigest hashing the contents of this structure."""
raise NotImplementedError()
class FingerprintedField(PayloadField):
"""Use this field to fingerprint any class that mixes in FingerprintedMixin.
The caller must ensure that the class properly implements fingerprint()
to hash the contents of the object.
:API: public
"""
def __init__(self, value):
self._value = value
def _compute_fingerprint(self):
return self._value.fingerprint()
@property
def value(self):
return self._value
class PythonRequirementsField(frozenset, PayloadField):
"""A frozenset subclass that mixes in PayloadField.
Must be initialized with an iterable of PythonRequirement instances.
:API: public
"""
def _compute_fingerprint(self):
def fingerprint_iter():
for req in self:
hash_items = (
repr(req._requirement),
req._repository,
req._name,
req._use_2to3,
req.compatibility,
)
yield stable_json_sha1(hash_items)
return combine_hashes(fingerprint_iter())
class ExcludesField(OrderedSet, PayloadField):
"""An OrderedSet subclass that mixes in PayloadField.
Must be initialized with an iterable of Excludes instances.
:API: public
"""
def _compute_fingerprint(self):
return stable_json_sha1(tuple(repr(exclude) for exclude in self))
class JarsField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of JarDependency instances.
:API: public
"""
def _compute_fingerprint(self):
return stable_json_sha1(tuple(jar.cache_key() for jar in self))
class PrimitiveField(PayloadField):
"""A general field for primitive types.
As long as the contents are JSON representable, their hash can be stably inferred.
:API: public
"""
def __init__(self, underlying=None):
self._underlying = underlying
@property
def value(self):
return self._underlying
def _compute_fingerprint(self):
return stable_json_sha1(self._underlying)
| apache-2.0 |
shaistaansari/django | django/views/i18n.py | 82 | 11102 | import gettext as gettext_module
import importlib
import json
import os
from django import http
from django.apps import apps
from django.conf import settings
from django.core.urlresolvers import translate_url
from django.template import Context, Engine
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import smart_text
from django.utils.formats import get_format, get_format_modules
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language, to_locale,
)
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language')
if lang_code and check_for_language(lang_code):
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = http.HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function(globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function(n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };
{% endif %}
/* gettext library */
django.catalog = django.catalog || {};
{% if catalog_str %}
var newcatalog = {{ catalog_str }};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
{% endif %}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Engine().from_string(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(reversed(settings.LOCALE_PATHS))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| bsd-3-clause |
qwertyjune/BethSaidaBible | venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py | 257 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.0"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| gpl-3.0 |
leiferikb/bitpop | build/third_party/twisted_10_2/twisted/persisted/sob.py | 60 | 6366 | # -*- test-case-name: twisted.test.test_sob -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
Save and load Small OBjects to and from files, using various formats.
Maintainer: Moshe Zadka
"""
import os, sys
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from twisted.python import log, runtime
from twisted.python.hashlib import md5
from twisted.persisted import styles
from zope.interface import implements, Interface
# Note:
# These encrypt/decrypt functions only work for data formats
# which are immune to having spaces tucked at the end.
# All data formats which persist saves hold that condition.
def _encrypt(passphrase, data):
from Crypto.Cipher import AES as cipher
leftover = len(data) % cipher.block_size
if leftover:
data += ' '*(cipher.block_size - leftover)
return cipher.new(md5(passphrase).digest()[:16]).encrypt(data)
def _decrypt(passphrase, data):
from Crypto.Cipher import AES
return AES.new(md5(passphrase).digest()[:16]).decrypt(data)
class IPersistable(Interface):
"""An object which can be saved in several formats to a file"""
def setStyle(style):
"""Set desired format.
@type style: string (one of 'pickle' or 'source')
"""
def save(tag=None, filename=None, passphrase=None):
"""Save object to file.
@type tag: string
@type filename: string
@type passphrase: string
"""
class Persistent:
implements(IPersistable)
style = "pickle"
def __init__(self, original, name):
self.original = original
self.name = name
def setStyle(self, style):
"""Set desired format.
@type style: string (one of 'pickle' or 'source')
"""
self.style = style
def _getFilename(self, filename, ext, tag):
if filename:
finalname = filename
filename = finalname + "-2"
elif tag:
filename = "%s-%s-2.%s" % (self.name, tag, ext)
finalname = "%s-%s.%s" % (self.name, tag, ext)
else:
filename = "%s-2.%s" % (self.name, ext)
finalname = "%s.%s" % (self.name, ext)
return finalname, filename
def _saveTemp(self, filename, passphrase, dumpFunc):
f = open(filename, 'wb')
if passphrase is None:
dumpFunc(self.original, f)
else:
s = StringIO.StringIO()
dumpFunc(self.original, s)
f.write(_encrypt(passphrase, s.getvalue()))
f.close()
def _getStyle(self):
if self.style == "source":
from twisted.persisted.aot import jellyToSource as dumpFunc
ext = "tas"
else:
def dumpFunc(obj, file):
pickle.dump(obj, file, 2)
ext = "tap"
return ext, dumpFunc
def save(self, tag=None, filename=None, passphrase=None):
"""Save object to file.
@type tag: string
@type filename: string
@type passphrase: string
"""
ext, dumpFunc = self._getStyle()
if passphrase:
ext = 'e' + ext
finalname, filename = self._getFilename(filename, ext, tag)
log.msg("Saving "+self.name+" application to "+finalname+"...")
self._saveTemp(filename, passphrase, dumpFunc)
if runtime.platformType == "win32" and os.path.isfile(finalname):
os.remove(finalname)
os.rename(filename, finalname)
log.msg("Saved.")
# "Persistant" has been present since 1.0.7, so retain it for compatibility
Persistant = Persistent
class _EverythingEphemeral(styles.Ephemeral):
initRun = 0
def __init__(self, mainMod):
"""
@param mainMod: The '__main__' module that this class will proxy.
"""
self.mainMod = mainMod
def __getattr__(self, key):
try:
return getattr(self.mainMod, key)
except AttributeError:
if self.initRun:
raise
else:
log.msg("Warning! Loading from __main__: %s" % key)
return styles.Ephemeral()
def load(filename, style, passphrase=None):
"""Load an object from a file.
Deserialize an object from a file. The file can be encrypted.
@param filename: string
@param style: string (one of 'pickle' or 'source')
@param passphrase: string
"""
mode = 'r'
if style=='source':
from twisted.persisted.aot import unjellyFromSource as _load
else:
_load, mode = pickle.load, 'rb'
if passphrase:
fp = StringIO.StringIO(_decrypt(passphrase,
open(filename, 'rb').read()))
else:
fp = open(filename, mode)
ee = _EverythingEphemeral(sys.modules['__main__'])
sys.modules['__main__'] = ee
ee.initRun = 1
try:
value = _load(fp)
finally:
# restore __main__ if an exception is raised.
sys.modules['__main__'] = ee.mainMod
styles.doUpgrade()
ee.initRun = 0
persistable = IPersistable(value, None)
if persistable is not None:
persistable.setStyle(style)
return value
def loadValueFromFile(filename, variable, passphrase=None):
"""Load the value of a variable in a Python file.
Run the contents of the file, after decrypting if C{passphrase} is
given, in a namespace and return the result of the variable
named C{variable}.
@param filename: string
@param variable: string
@param passphrase: string
"""
if passphrase:
mode = 'rb'
else:
mode = 'r'
fileObj = open(filename, mode)
d = {'__file__': filename}
if passphrase:
data = fileObj.read()
data = _decrypt(passphrase, data)
exec data in d, d
else:
exec fileObj in d, d
value = d[variable]
return value
def guessType(filename):
ext = os.path.splitext(filename)[1]
return {
'.tac': 'python',
'.etac': 'python',
'.py': 'python',
'.tap': 'pickle',
'.etap': 'pickle',
'.tas': 'source',
'.etas': 'source',
}[ext]
__all__ = ['loadValueFromFile', 'load', 'Persistent', 'Persistant',
'IPersistable', 'guessType']
| gpl-3.0 |
linearregression/socorro | socorro/cron/jobs/truncate_partitions.py | 9 | 1248 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from configman import Namespace
from crontabber.base import BaseCronApp
from crontabber.mixins import (
with_postgres_transactions,
with_single_postgres_transaction,
)
@with_postgres_transactions()
@with_single_postgres_transaction()
class TruncatePartitionsCronApp(BaseCronApp):
app_name = 'truncate-partitions'
app_version = '1.0'
app_description = """See
http://socorro.readthedocs.org/en/latest/development
/databaseadminfunctions.html#truncate-partitions
See https://bugzilla.mozilla.org/show_bug.cgi?id=1117911
"""
required_config = Namespace()
required_config.add_option(
'weeks_to_keep',
default=2, # 2 weeks is sufficient for a backfill range if need be
doc='Number of weeks of raw crash data to keep in Postgres')
def run(self, connection):
cursor = connection.cursor()
# Casting to date because stored procs in psql are strongly typed.
cursor.execute(
"select truncate_partitions(%s)", (self.config.weeks_to_keep,)
)
| mpl-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/test/test_datetime.py | 12 | 134698 | """Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
from __future__ import division
import sys
import pickle
import cPickle
import unittest
from test import test_support
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
pickle_choices = [(pickler, unpickler, proto)
for pickler in pickle, cPickle
for unpickler in pickle, cPickle
for proto in range(3)]
assert len(pickle_choices) == 2*2*3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ())
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
import datetime
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertIs(type(orig), tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIs(type(derived), tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
orig = PicklableFixedOffset(offset, 'cookie')
self.assertIsInstance(orig, tzinfo)
self.assertTrue(type(orig) is PicklableFixedOffset)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), 'cookie')
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertTrue(type(derived) is PicklableFixedOffset)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
# Base class for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20L, [], me])
self.assertIn([], [me, 1, 20L, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
self.assertRaises(TypeError, cmp, (), me)
self.assertRaises(TypeError, cmp, me, ())
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10L, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10L, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10L, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Mul/div by float isn't supported.
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a/x)
self.assertRaises(TypeError, lambda: x/a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
for zero in 0, 0L:
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(),
((24*3600*td.days + td.seconds)*10**6
+ td.microseconds)/10**6)
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertFalse(timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertIs(type(t1), T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertIs(type(t2), T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertIs(type(t3), timedelta)
t4 = T.from_td(t3)
self.assertIs(type(t4), T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in xrange(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
if today != todayagain:
self.assertAlmostEqual(todayagain, today,
delta=timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split())
iso_long_years.sort()
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime(u"%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
self.assertRaises(TypeError, cmp, our, 1)
self.assertRaises(TypeError, cmp, 1, our)
class AnotherDateTimeClass(object):
def __cmp__(self, other):
# Return "equal" so calling this can't be confused with
# compare-by-address (which never says "equal" for distinct
# objects).
return 0
__hash__ = None # Silence Py3k warning
# This still errors, because date and datetime comparison raise
# TypeError instead of NotImplemented when they don't know what to
# do, in order to stop comparison from falling back to the default
# compare-by-address.
their = AnotherDateTimeClass()
self.assertRaises(TypeError, cmp, our, their)
# Oops: The next stab raises TypeError in the C implementation,
# but not in the Python implementation of datetime. The difference
# is due to that the Python implementation defines __cmp__ but
# the C implementation defines tp_richcompare. This is more pain
# to fix than it's worth, so commenting out the test.
# self.assertEqual(cmp(their, our), 0)
# But date and datetime comparison return NotImplemented instead if the
# other object has a timetuple attr. This gives the other object a
# chance to do the comparison.
class Comparable(AnotherDateTimeClass):
def timetuple(self):
return ()
their = Comparable()
self.assertEqual(cmp(our, their), 0)
self.assertEqual(cmp(their, our), 0)
self.assertTrue(our == their)
self.assertTrue(their == our)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1900.
cls = self.theclass
self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900")
for y in 1, 49, 51, 99, 100, 1000, 1899:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = '1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in '9', chr(0), chr(13), '\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + chr(ord_byte) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0002-03-02\x0004:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(a, proto)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than one microsecond smaller than an integer.
self.assertEqual(self.theclass.fromtimestamp(0.9999999),
self.theclass.fromtimestamp(1))
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertLessEqual(abs(from_timestamp - from_now), tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
result, frac = _strptime._strptime(string, format)
expected = self.theclass(*(result[0:6]+(frac,)))
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertFalse(cls(0))
self.assertFalse(cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertIs(t.tzinfo, b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertIsNone(t.utcoffset())
self.assertIsNone(t.dst())
self.assertIsNone(t.tzname())
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
expected = cmp(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertIsNone(t.tzinfo)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertIsNone(t4.tzinfo)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertIsNone(t4.utcoffset())
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertIsNone(t4.tzname())
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertIsNone(t4.dst())
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
def tzname(self, dt): return 42
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertFalse(t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertFalse(t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 == t1)
self.assertTrue(t2 == t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertIs(nowaware.tzinfo, tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertIs(nowaware.tzinfo, tz55)
nowawareplus2 = delta + nowaware
self.assertIs(nowawareplus2.tzinfo, tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertIs(diff.tzinfo, tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertIs(nowawareplus.tzinfo, tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0)
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertIs(now.tzinfo, weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
# Ensure tm_isdst is 0 regardless of what dst() says: DST is never
# in effect for a UTC time.
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
self.assertEqual(0, t.tm_isdst)
# At the edges, UTC adjustment can normalize into years out-of-range
# for a datetime object. Ensure that a correct timetuple is
# created anyway.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
t = tiny.utctimetuple()
self.assertEqual(t.tm_year, MINYEAR-1)
self.assertEqual(t.tm_mon, 12)
self.assertEqual(t.tm_mday, 31)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 1)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year
self.assertEqual(t.tm_isdst, 0)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
t = huge.utctimetuple()
self.assertEqual(t.tm_year, MAXYEAR+1)
self.assertEqual(t.tm_mon, 1)
self.assertEqual(t.tm_mday, 1)
self.assertEqual(t.tm_hour, 23)
self.assertEqual(t.tm_min, 58)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 1)
self.assertEqual(t.tm_isdst, 0)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertIs(dt.tzinfo, f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertIs(x.tzinfo, f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertIs(got.tzinfo, fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertIs(got.tzinfo, expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertFalse(as_date == as_datetime)
self.assertFalse(as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertTrue(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
self.assertFalse(as_date.__eq__(as_datetime.replace(day=different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| mit |
sje397/p2pool | wstools/Utility.py | 292 | 50865 | # Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import sys, types, httplib, urllib, socket, weakref
from os.path import isfile
from string import join, strip, split
from UserDict import UserDict
from cStringIO import StringIO
from TimeoutSocket import TimeoutSocket, TimeoutError
from urlparse import urlparse
from httplib import HTTPConnection, HTTPSConnection
from exceptions import Exception
try:
from ZSI import _get_idstr
except:
def _get_idstr(pyobj):
'''Python 2.3.x generates a FutureWarning for negative IDs, so
we use a different prefix character to ensure uniqueness, and
call abs() to avoid the warning.'''
x = id(pyobj)
if x < 0:
return 'x%x' % abs(x)
return 'o%x' % x
import xml.dom.minidom
from xml.dom import Node
import logging
from c14n import Canonicalize
from Namespaces import SCHEMA, SOAP, XMLNS, ZSI_SCHEMA_URI
try:
from xml.dom.ext import SplitQName
except:
def SplitQName(qname):
'''SplitQName(qname) -> (string, string)
Split Qualified Name into a tuple of len 2, consisting
of the prefix and the local name.
(prefix, localName)
Special Cases:
xmlns -- (localName, 'xmlns')
None -- (None, localName)
'''
l = qname.split(':')
if len(l) == 1:
l.insert(0, None)
elif len(l) == 2:
if l[0] == 'xmlns':
l.reverse()
else:
return
return tuple(l)
#
# python2.3 urllib.basejoin does not remove current directory ./
# from path and this causes problems on subsequent basejoins.
#
basejoin = urllib.basejoin
if sys.version_info[0:2] < (2, 4, 0, 'final', 0)[0:2]:
#basejoin = lambda base,url: urllib.basejoin(base,url.lstrip('./'))
token = './'
def basejoin(base, url):
if url.startswith(token) is True:
return urllib.basejoin(base,url[2:])
return urllib.basejoin(base,url)
class NamespaceError(Exception):
"""Used to indicate a Namespace Error."""
class RecursionError(Exception):
"""Used to indicate a HTTP redirect recursion."""
class ParseError(Exception):
"""Used to indicate a XML parsing error."""
class DOMException(Exception):
"""Used to indicate a problem processing DOM."""
class Base:
"""Base class for instance level Logging"""
def __init__(self, module=__name__):
self.logger = logging.getLogger('%s-%s(%s)' %(module, self.__class__, _get_idstr(self)))
class HTTPResponse:
"""Captures the information in an HTTP response message."""
def __init__(self, response):
self.status = response.status
self.reason = response.reason
self.headers = response.msg
self.body = response.read() or None
response.close()
class TimeoutHTTP(HTTPConnection):
"""A custom http connection object that supports socket timeout."""
def __init__(self, host, port=None, timeout=20):
HTTPConnection.__init__(self, host, port)
self.timeout = timeout
def connect(self):
self.sock = TimeoutSocket(self.timeout)
self.sock.connect((self.host, self.port))
class TimeoutHTTPS(HTTPSConnection):
"""A custom https object that supports socket timeout. Note that this
is not really complete. The builtin SSL support in the Python socket
module requires a real socket (type) to be passed in to be hooked to
SSL. That means our fake socket won't work and our timeout hacks are
bypassed for send and recv calls. Since our hack _is_ in place at
connect() time, it should at least provide some timeout protection."""
def __init__(self, host, port=None, timeout=20, **kwargs):
HTTPSConnection.__init__(self, str(host), port, **kwargs)
self.timeout = timeout
def connect(self):
sock = TimeoutSocket(self.timeout)
sock.connect((self.host, self.port))
realsock = getattr(sock.sock, '_sock', sock.sock)
ssl = socket.ssl(realsock, self.key_file, self.cert_file)
self.sock = httplib.FakeSocket(sock, ssl)
def urlopen(url, timeout=20, redirects=None):
"""A minimal urlopen replacement hack that supports timeouts for http.
Note that this supports GET only."""
scheme, host, path, params, query, frag = urlparse(url)
if not scheme in ('http', 'https'):
return urllib.urlopen(url)
if params: path = '%s;%s' % (path, params)
if query: path = '%s?%s' % (path, query)
if frag: path = '%s#%s' % (path, frag)
if scheme == 'https':
# If ssl is not compiled into Python, you will not get an exception
# until a conn.endheaders() call. We need to know sooner, so use
# getattr.
try:
import M2Crypto
except ImportError:
if not hasattr(socket, 'ssl'):
raise RuntimeError, 'no built-in SSL Support'
conn = TimeoutHTTPS(host, None, timeout)
else:
ctx = M2Crypto.SSL.Context()
ctx.set_session_timeout(timeout)
conn = M2Crypto.httpslib.HTTPSConnection(host, ssl_context=ctx)
conn.set_debuglevel(1)
else:
conn = TimeoutHTTP(host, None, timeout)
conn.putrequest('GET', path)
conn.putheader('Connection', 'close')
conn.endheaders()
response = None
while 1:
response = conn.getresponse()
if response.status != 100:
break
conn._HTTPConnection__state = httplib._CS_REQ_SENT
conn._HTTPConnection__response = None
status = response.status
# If we get an HTTP redirect, we will follow it automatically.
if status >= 300 and status < 400:
location = response.msg.getheader('location')
if location is not None:
response.close()
if redirects is not None and redirects.has_key(location):
raise RecursionError(
'Circular HTTP redirection detected.'
)
if redirects is None:
redirects = {}
redirects[location] = 1
return urlopen(location, timeout, redirects)
raise HTTPResponse(response)
if not (status >= 200 and status < 300):
raise HTTPResponse(response)
body = StringIO(response.read())
response.close()
return body
class DOM:
"""The DOM singleton defines a number of XML related constants and
provides a number of utility methods for DOM related tasks. It
also provides some basic abstractions so that the rest of the
package need not care about actual DOM implementation in use."""
# Namespace stuff related to the SOAP specification.
NS_SOAP_ENV_1_1 = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_SOAP_ENC_1_1 = 'http://schemas.xmlsoap.org/soap/encoding/'
NS_SOAP_ENV_1_2 = 'http://www.w3.org/2001/06/soap-envelope'
NS_SOAP_ENC_1_2 = 'http://www.w3.org/2001/06/soap-encoding'
NS_SOAP_ENV_ALL = (NS_SOAP_ENV_1_1, NS_SOAP_ENV_1_2)
NS_SOAP_ENC_ALL = (NS_SOAP_ENC_1_1, NS_SOAP_ENC_1_2)
NS_SOAP_ENV = NS_SOAP_ENV_1_1
NS_SOAP_ENC = NS_SOAP_ENC_1_1
_soap_uri_mapping = {
NS_SOAP_ENV_1_1 : '1.1',
NS_SOAP_ENV_1_2 : '1.2',
}
SOAP_ACTOR_NEXT_1_1 = 'http://schemas.xmlsoap.org/soap/actor/next'
SOAP_ACTOR_NEXT_1_2 = 'http://www.w3.org/2001/06/soap-envelope/actor/next'
SOAP_ACTOR_NEXT_ALL = (SOAP_ACTOR_NEXT_1_1, SOAP_ACTOR_NEXT_1_2)
def SOAPUriToVersion(self, uri):
"""Return the SOAP version related to an envelope uri."""
value = self._soap_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetSOAPEnvUri(self, version):
"""Return the appropriate SOAP envelope uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENV_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPEncUri(self, version):
"""Return the appropriate SOAP encoding uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENC_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPActorNextUri(self, version):
"""Return the right special next-actor uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'SOAP_ACTOR_NEXT_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
# Namespace stuff related to XML Schema.
NS_XSD_99 = 'http://www.w3.org/1999/XMLSchema'
NS_XSI_99 = 'http://www.w3.org/1999/XMLSchema-instance'
NS_XSD_00 = 'http://www.w3.org/2000/10/XMLSchema'
NS_XSI_00 = 'http://www.w3.org/2000/10/XMLSchema-instance'
NS_XSD_01 = 'http://www.w3.org/2001/XMLSchema'
NS_XSI_01 = 'http://www.w3.org/2001/XMLSchema-instance'
NS_XSD_ALL = (NS_XSD_99, NS_XSD_00, NS_XSD_01)
NS_XSI_ALL = (NS_XSI_99, NS_XSI_00, NS_XSI_01)
NS_XSD = NS_XSD_01
NS_XSI = NS_XSI_01
_xsd_uri_mapping = {
NS_XSD_99 : NS_XSI_99,
NS_XSD_00 : NS_XSI_00,
NS_XSD_01 : NS_XSI_01,
}
for key, value in _xsd_uri_mapping.items():
_xsd_uri_mapping[value] = key
def InstanceUriForSchemaUri(self, uri):
"""Return the appropriate matching XML Schema instance uri for
the given XML Schema namespace uri."""
return self._xsd_uri_mapping.get(uri)
def SchemaUriForInstanceUri(self, uri):
"""Return the appropriate matching XML Schema namespace uri for
the given XML Schema instance namespace uri."""
return self._xsd_uri_mapping.get(uri)
# Namespace stuff related to WSDL.
NS_WSDL_1_1 = 'http://schemas.xmlsoap.org/wsdl/'
NS_WSDL_ALL = (NS_WSDL_1_1,)
NS_WSDL = NS_WSDL_1_1
NS_SOAP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/soap/'
NS_HTTP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/http/'
NS_MIME_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/mime/'
NS_SOAP_BINDING_ALL = (NS_SOAP_BINDING_1_1,)
NS_HTTP_BINDING_ALL = (NS_HTTP_BINDING_1_1,)
NS_MIME_BINDING_ALL = (NS_MIME_BINDING_1_1,)
NS_SOAP_BINDING = NS_SOAP_BINDING_1_1
NS_HTTP_BINDING = NS_HTTP_BINDING_1_1
NS_MIME_BINDING = NS_MIME_BINDING_1_1
NS_SOAP_HTTP_1_1 = 'http://schemas.xmlsoap.org/soap/http'
NS_SOAP_HTTP_ALL = (NS_SOAP_HTTP_1_1,)
NS_SOAP_HTTP = NS_SOAP_HTTP_1_1
_wsdl_uri_mapping = {
NS_WSDL_1_1 : '1.1',
}
def WSDLUriToVersion(self, uri):
"""Return the WSDL version related to a WSDL namespace uri."""
value = self._wsdl_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetWSDLUri(self, version):
attr = 'NS_WSDL_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLSoapBindingUri(self, version):
attr = 'NS_SOAP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpBindingUri(self, version):
attr = 'NS_HTTP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLMimeBindingUri(self, version):
attr = 'NS_MIME_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpTransportUri(self, version):
attr = 'NS_SOAP_HTTP_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
# Other xml namespace constants.
NS_XMLNS = 'http://www.w3.org/2000/xmlns/'
def isElement(self, node, name, nsuri=None):
"""Return true if the given node is an element with the given
name and optional namespace uri."""
if node.nodeType != node.ELEMENT_NODE:
return 0
return node.localName == name and \
(nsuri is None or self.nsUriMatch(node.namespaceURI, nsuri))
def getElement(self, node, name, nsuri=None, default=join):
"""Return the first child of node with a matching name and
namespace uri, or the default if one is provided."""
nsmatch = self.nsUriMatch
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and
(nsuri is None or nsmatch(child.namespaceURI, nsuri))
):
return child
if default is not join:
return default
raise KeyError, name
def getElementById(self, node, id, default=join):
"""Return the first child of node matching an id reference."""
attrget = self.getAttr
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if attrget(child, 'id') == id:
return child
if default is not join:
return default
raise KeyError, name
def getMappingById(self, document, depth=None, element=None,
mapping=None, level=1):
"""Create an id -> element mapping of those elements within a
document that define an id attribute. The depth of the search
may be controlled by using the (1-based) depth argument."""
if document is not None:
element = document.documentElement
mapping = {}
attr = element._attrs.get('id', None)
if attr is not None:
mapping[attr.value] = element
if depth is None or depth > level:
level = level + 1
ELEMENT_NODE = element.ELEMENT_NODE
for child in element.childNodes:
if child.nodeType == ELEMENT_NODE:
self.getMappingById(None, depth, child, mapping, level)
return mapping
def getElements(self, node, name, nsuri=None):
"""Return a sequence of the child elements of the given node that
match the given name and optional namespace uri."""
nsmatch = self.nsUriMatch
result = []
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and (
(nsuri is None) or nsmatch(child.namespaceURI, nsuri))):
result.append(child)
return result
def hasAttr(self, node, name, nsuri=None):
"""Return true if element has attribute with the given name and
optional nsuri. If nsuri is not specified, returns true if an
attribute exists with the given name with any namespace."""
if nsuri is None:
if node.hasAttribute(name):
return True
return False
return node.hasAttributeNS(nsuri, name)
def getAttr(self, node, name, nsuri=None, default=join):
"""Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace."""
if nsuri is None:
result = node._attrs.get(name, None)
if result is None:
for item in node._attrsNS.keys():
if item[1] == name:
result = node._attrsNS[item]
break
else:
result = node._attrsNS.get((nsuri, name), None)
if result is not None:
return result.value
if default is not join:
return default
return ''
def getAttrs(self, node):
"""Return a Collection of all attributes
"""
attrs = {}
for k,v in node._attrs.items():
attrs[k] = v.value
return attrs
def getElementText(self, node, preserve_ws=None):
"""Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value."""
result = []
for child in node.childNodes:
nodetype = child.nodeType
if nodetype == child.TEXT_NODE or \
nodetype == child.CDATA_SECTION_NODE:
result.append(child.nodeValue)
value = join(result, '')
if preserve_ws is None:
value = strip(value)
return value
def findNamespaceURI(self, prefix, node):
"""Find a namespace uri given a prefix and a context node."""
attrkey = (self.NS_XMLNS, prefix)
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node is None:
raise DOMException('Value for prefix %s not found.' % prefix)
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Value for prefix %s not found.' % prefix)
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Value for prefix %s not found.' % prefix)
def findDefaultNS(self, node):
"""Return the current default namespace uri for the given node."""
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Cannot determine default namespace.')
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine default namespace.')
def findTargetNS(self, node):
"""Return the defined target namespace uri for the given node."""
attrget = self.getAttr
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = attrget(node, 'targetNamespace', default=None)
if result is not None:
return result
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine target namespace.')
def getTypeRef(self, element):
"""Return (namespaceURI, name) for a type attribue of the given
element, or None if the element does not have a type attribute."""
typeattr = self.getAttr(element, 'type', default=None)
if typeattr is None:
return None
parts = typeattr.split(':', 1)
if len(parts) == 2:
nsuri = self.findNamespaceURI(parts[0], element)
else:
nsuri = self.findDefaultNS(element)
return (nsuri, parts[1])
def importNode(self, document, node, deep=0):
"""Implements (well enough for our purposes) DOM node import."""
nodetype = node.nodeType
if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE):
raise DOMException('Illegal node type for importNode')
if nodetype == node.ENTITY_REFERENCE_NODE:
deep = 0
clone = node.cloneNode(deep)
self._setOwnerDoc(document, clone)
clone.__imported__ = 1
return clone
def _setOwnerDoc(self, document, node):
node.ownerDocument = document
for child in node.childNodes:
self._setOwnerDoc(document, child)
def nsUriMatch(self, value, wanted, strict=0, tt=type(())):
"""Return a true value if two namespace uri values match."""
if value == wanted or (type(wanted) is tt) and value in wanted:
return 1
if not strict and value is not None:
wanted = type(wanted) is tt and wanted or (wanted,)
value = value[-1:] != '/' and value or value[:-1]
for item in wanted:
if item == value or item[:-1] == value:
return 1
return 0
def createDocument(self, nsuri, qname, doctype=None):
"""Create a new writable DOM document object."""
impl = xml.dom.minidom.getDOMImplementation()
return impl.createDocument(nsuri, qname, doctype)
def loadDocument(self, data):
"""Load an xml file from a file-like object and return a DOM
document instance."""
return xml.dom.minidom.parse(data)
def loadFromURL(self, url):
"""Load an xml file from a URL and return a DOM document."""
if isfile(url) is True:
file = open(url, 'r')
else:
file = urlopen(url)
try:
result = self.loadDocument(file)
except Exception, ex:
file.close()
raise ParseError(('Failed to load document %s' %url,) + ex.args)
else:
file.close()
return result
DOM = DOM()
class MessageInterface:
'''Higher Level Interface, delegates to DOM singleton, must
be subclassed and implement all methods that throw NotImplementedError.
'''
def __init__(self, sw):
'''Constructor, May be extended, do not override.
sw -- soapWriter instance
'''
self.sw = None
if type(sw) != weakref.ReferenceType and sw is not None:
self.sw = weakref.ref(sw)
else:
self.sw = sw
def AddCallback(self, func, *arglist):
self.sw().AddCallback(func, *arglist)
def Known(self, obj):
return self.sw().Known(obj)
def Forget(self, obj):
return self.sw().Forget(obj)
def canonicalize(self):
'''canonicalize the underlying DOM, and return as string.
'''
raise NotImplementedError, ''
def createDocument(self, namespaceURI=SOAP.ENV, localName='Envelope'):
'''create Document
'''
raise NotImplementedError, ''
def createAppendElement(self, namespaceURI, localName):
'''create and append element(namespaceURI,localName), and return
the node.
'''
raise NotImplementedError, ''
def findNamespaceURI(self, qualifiedName):
raise NotImplementedError, ''
def resolvePrefix(self, prefix):
raise NotImplementedError, ''
def setAttributeNS(self, namespaceURI, localName, value):
'''set attribute (namespaceURI, localName)=value
'''
raise NotImplementedError, ''
def setAttributeType(self, namespaceURI, localName):
'''set attribute xsi:type=(namespaceURI, localName)
'''
raise NotImplementedError, ''
def setNamespaceAttribute(self, namespaceURI, prefix):
'''set namespace attribute xmlns:prefix=namespaceURI
'''
raise NotImplementedError, ''
class ElementProxy(Base, MessageInterface):
'''
'''
_soap_env_prefix = 'SOAP-ENV'
_soap_enc_prefix = 'SOAP-ENC'
_zsi_prefix = 'ZSI'
_xsd_prefix = 'xsd'
_xsi_prefix = 'xsi'
_xml_prefix = 'xml'
_xmlns_prefix = 'xmlns'
_soap_env_nsuri = SOAP.ENV
_soap_enc_nsuri = SOAP.ENC
_zsi_nsuri = ZSI_SCHEMA_URI
_xsd_nsuri = SCHEMA.XSD3
_xsi_nsuri = SCHEMA.XSI3
_xml_nsuri = XMLNS.XML
_xmlns_nsuri = XMLNS.BASE
standard_ns = {\
_xml_prefix:_xml_nsuri,
_xmlns_prefix:_xmlns_nsuri
}
reserved_ns = {\
_soap_env_prefix:_soap_env_nsuri,
_soap_enc_prefix:_soap_enc_nsuri,
_zsi_prefix:_zsi_nsuri,
_xsd_prefix:_xsd_nsuri,
_xsi_prefix:_xsi_nsuri,
}
name = None
namespaceURI = None
def __init__(self, sw, message=None):
'''Initialize.
sw -- SoapWriter
'''
self._indx = 0
MessageInterface.__init__(self, sw)
Base.__init__(self)
self._dom = DOM
self.node = None
if type(message) in (types.StringType,types.UnicodeType):
self.loadFromString(message)
elif isinstance(message, ElementProxy):
self.node = message._getNode()
else:
self.node = message
self.processorNss = self.standard_ns.copy()
self.processorNss.update(self.reserved_ns)
def __str__(self):
return self.toString()
def evaluate(self, expression, processorNss=None):
'''expression -- XPath compiled expression
'''
from Ft.Xml import XPath
if not processorNss:
context = XPath.Context.Context(self.node, processorNss=self.processorNss)
else:
context = XPath.Context.Context(self.node, processorNss=processorNss)
nodes = expression.evaluate(context)
return map(lambda node: ElementProxy(self.sw,node), nodes)
#############################################
# Methods for checking/setting the
# classes (namespaceURI,name) node.
#############################################
def checkNode(self, namespaceURI=None, localName=None):
'''
namespaceURI -- namespace of element
localName -- local name of element
'''
namespaceURI = namespaceURI or self.namespaceURI
localName = localName or self.name
check = False
if localName and self.node:
check = self._dom.isElement(self.node, localName, namespaceURI)
if not check:
raise NamespaceError, 'unexpected node type %s, expecting %s' %(self.node, localName)
def setNode(self, node=None):
if node:
if isinstance(node, ElementProxy):
self.node = node._getNode()
else:
self.node = node
elif self.node:
node = self._dom.getElement(self.node, self.name, self.namespaceURI, default=None)
if not node:
raise NamespaceError, 'cant find element (%s,%s)' %(self.namespaceURI,self.name)
self.node = node
else:
#self.node = self._dom.create(self.node, self.name, self.namespaceURI, default=None)
self.createDocument(self.namespaceURI, localName=self.name, doctype=None)
self.checkNode()
#############################################
# Wrapper Methods for direct DOM Element Node access
#############################################
def _getNode(self):
return self.node
def _getElements(self):
return self._dom.getElements(self.node, name=None)
def _getOwnerDocument(self):
return self.node.ownerDocument or self.node
def _getUniquePrefix(self):
'''I guess we need to resolve all potential prefixes
because when the current node is attached it copies the
namespaces into the parent node.
'''
while 1:
self._indx += 1
prefix = 'ns%d' %self._indx
try:
self._dom.findNamespaceURI(prefix, self._getNode())
except DOMException, ex:
break
return prefix
def _getPrefix(self, node, nsuri):
'''
Keyword arguments:
node -- DOM Element Node
nsuri -- namespace of attribute value
'''
try:
if node and (node.nodeType == node.ELEMENT_NODE) and \
(nsuri == self._dom.findDefaultNS(node)):
return None
except DOMException, ex:
pass
if nsuri == XMLNS.XML:
return self._xml_prefix
if node.nodeType == Node.ELEMENT_NODE:
for attr in node.attributes.values():
if attr.namespaceURI == XMLNS.BASE \
and nsuri == attr.value:
return attr.localName
else:
if node.parentNode:
return self._getPrefix(node.parentNode, nsuri)
raise NamespaceError, 'namespaceURI "%s" is not defined' %nsuri
def _appendChild(self, node):
'''
Keyword arguments:
node -- DOM Element Node
'''
if node is None:
raise TypeError, 'node is None'
self.node.appendChild(node)
def _insertBefore(self, newChild, refChild):
'''
Keyword arguments:
child -- DOM Element Node to insert
refChild -- DOM Element Node
'''
self.node.insertBefore(newChild, refChild)
def _setAttributeNS(self, namespaceURI, qualifiedName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
qualifiedName -- qualified name of new attribute value
value -- value of attribute
'''
self.node.setAttributeNS(namespaceURI, qualifiedName, value)
#############################################
#General Methods
#############################################
def isFault(self):
'''check to see if this is a soap:fault message.
'''
return False
def getPrefix(self, namespaceURI):
try:
prefix = self._getPrefix(node=self.node, nsuri=namespaceURI)
except NamespaceError, ex:
prefix = self._getUniquePrefix()
self.setNamespaceAttribute(prefix, namespaceURI)
return prefix
def getDocument(self):
return self._getOwnerDocument()
def setDocument(self, document):
self.node = document
def importFromString(self, xmlString):
doc = self._dom.loadDocument(StringIO(xmlString))
node = self._dom.getElement(doc, name=None)
clone = self.importNode(node)
self._appendChild(clone)
def importNode(self, node):
if isinstance(node, ElementProxy):
node = node._getNode()
return self._dom.importNode(self._getOwnerDocument(), node, deep=1)
def loadFromString(self, data):
self.node = self._dom.loadDocument(StringIO(data))
def canonicalize(self):
return Canonicalize(self.node)
def toString(self):
return self.canonicalize()
def createDocument(self, namespaceURI, localName, doctype=None):
'''If specified must be a SOAP envelope, else may contruct an empty document.
'''
prefix = self._soap_env_prefix
if namespaceURI == self.reserved_ns[prefix]:
qualifiedName = '%s:%s' %(prefix,localName)
elif namespaceURI is localName is None:
self.node = self._dom.createDocument(None,None,None)
return
else:
raise KeyError, 'only support creation of document in %s' %self.reserved_ns[prefix]
document = self._dom.createDocument(nsuri=namespaceURI, qname=qualifiedName, doctype=doctype)
self.node = document.childNodes[0]
#set up reserved namespace attributes
for prefix,nsuri in self.reserved_ns.items():
self._setAttributeNS(namespaceURI=self._xmlns_nsuri,
qualifiedName='%s:%s' %(self._xmlns_prefix,prefix),
value=nsuri)
#############################################
#Methods for attributes
#############################################
def hasAttribute(self, namespaceURI, localName):
return self._dom.hasAttr(self._getNode(), name=localName, nsuri=namespaceURI)
def setAttributeType(self, namespaceURI, localName):
'''set xsi:type
Keyword arguments:
namespaceURI -- namespace of attribute value
localName -- name of new attribute value
'''
self.logger.debug('setAttributeType: (%s,%s)', namespaceURI, localName)
value = localName
if namespaceURI:
value = '%s:%s' %(self.getPrefix(namespaceURI),localName)
xsi_prefix = self.getPrefix(self._xsi_nsuri)
self._setAttributeNS(self._xsi_nsuri, '%s:type' %xsi_prefix, value)
def createAttributeNS(self, namespace, name, value):
document = self._getOwnerDocument()
##this function doesn't exist!! it has only two arguments
attrNode = document.createAttributeNS(namespace, name, value)
def setAttributeNS(self, namespaceURI, localName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute to create, None is for
attributes in no namespace.
localName -- local name of new attribute
value -- value of new attribute
'''
prefix = None
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except KeyError, ex:
prefix = 'ns2'
self.setNamespaceAttribute(prefix, namespaceURI)
qualifiedName = localName
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
self._setAttributeNS(namespaceURI, qualifiedName, value)
def setNamespaceAttribute(self, prefix, namespaceURI):
'''
Keyword arguments:
prefix -- xmlns prefix
namespaceURI -- value of prefix
'''
self._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
#############################################
#Methods for elements
#############################################
def createElementNS(self, namespace, qname):
'''
Keyword arguments:
namespace -- namespace of element to create
qname -- qualified name of new element
'''
document = self._getOwnerDocument()
node = document.createElementNS(namespace, qname)
return ElementProxy(self.sw, node)
def createAppendSetElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, then set it to be the current node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
node = self.createAppendElement(namespaceURI, localName, prefix=None)
node=node._getNode()
self._setNode(node._getNode())
def createAppendElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, and return the newly created node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
declare = False
qualifiedName = localName
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except:
declare = True
prefix = prefix or self._getUniquePrefix()
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
if declare:
node._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
self._appendChild(node=node._getNode())
return node
def createInsertBefore(self, namespaceURI, localName, refChild):
qualifiedName = localName
prefix = self.getPrefix(namespaceURI)
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
self._insertBefore(newChild=node._getNode(), refChild=refChild._getNode())
return node
def getElement(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of element
localName -- local name of element
'''
node = self._dom.getElement(self.node, localName, namespaceURI, default=None)
if node:
return ElementProxy(self.sw, node)
return None
def getAttributeValue(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
localName -- local name of attribute
'''
if self.hasAttribute(namespaceURI, localName):
attr = self.node.getAttributeNodeNS(namespaceURI,localName)
return attr.value
return None
def getValue(self):
return self._dom.getElementText(self.node, preserve_ws=True)
#############################################
#Methods for text nodes
#############################################
def createAppendTextNode(self, pyobj):
node = self.createTextNode(pyobj)
self._appendChild(node=node._getNode())
return node
def createTextNode(self, pyobj):
document = self._getOwnerDocument()
node = document.createTextNode(pyobj)
return ElementProxy(self.sw, node)
#############################################
#Methods for retrieving namespaceURI's
#############################################
def findNamespaceURI(self, qualifiedName):
parts = SplitQName(qualifiedName)
element = self._getNode()
if len(parts) == 1:
return (self._dom.findTargetNS(element), value)
return self._dom.findNamespaceURI(parts[0], element)
def resolvePrefix(self, prefix):
element = self._getNode()
return self._dom.findNamespaceURI(prefix, element)
def getSOAPEnvURI(self):
return self._soap_env_nsuri
def isEmpty(self):
return not self.node
class Collection(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.list = []
self._func = key or self.default
def __getitem__(self, key):
if type(key) is type(1):
return self.list[key]
return self.data[key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
self.data[key] = item
def keys(self):
return map(lambda i: self._func(i), self.list)
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
class CollectionNS(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.targetNamespace = None
self.list = []
self._func = key or self.default
def __getitem__(self, key):
self.targetNamespace = self.parent().targetNamespace
if type(key) is types.IntType:
return self.list[key]
elif self.__isSequence(key):
nsuri,name = key
return self.data[nsuri][name]
return self.data[self.parent().targetNamespace][key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
targetNamespace = getattr(item, 'targetNamespace', self.parent().targetNamespace)
if not self.data.has_key(targetNamespace):
self.data[targetNamespace] = {}
self.data[targetNamespace][key] = item
def __isSequence(self, key):
return (type(key) in (types.TupleType,types.ListType) and len(key) == 2)
def keys(self):
keys = []
for tns in self.data.keys():
keys.append(map(lambda i: (tns,self._func(i)), self.data[tns].values()))
return keys
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
# This is a runtime guerilla patch for pulldom (used by minidom) so
# that xml namespace declaration attributes are not lost in parsing.
# We need them to do correct QName linking for XML Schema and WSDL.
# The patch has been submitted to SF for the next Python version.
from xml.dom.pulldom import PullDOM, START_ELEMENT
if 1:
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or ''
PullDOM.startPrefixMapping = startPrefixMapping
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
PullDOM.startElementNS = startElementNS
#
# This is a runtime guerilla patch for minidom so
# that xmlns prefixed attributes dont raise AttributeErrors
# during cloning.
#
# Namespace declarations can appear in any start-tag, must look for xmlns
# prefixed attribute names during cloning.
#
# key (attr.namespaceURI, tag)
# ('http://www.w3.org/2000/xmlns/', u'xsd') <xml.dom.minidom.Attr instance at 0x82227c4>
# ('http://www.w3.org/2000/xmlns/', 'xmlns') <xml.dom.minidom.Attr instance at 0x8414b3c>
#
# xml.dom.minidom.Attr.nodeName = xmlns:xsd
# xml.dom.minidom.Attr.value = = http://www.w3.org/2001/XMLSchema
if 1:
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
prefix, tag = xml.dom.minidom._nssplit(attr.nodeName)
if prefix == 'xmlns':
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
elif prefix:
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
else:
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.nodeName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == xml.dom.minidom.Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == xml.dom.minidom.Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == xml.dom.minidom.Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = xml.dom.minidom.Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = xml.dom.minidom.Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implemenetation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
xml.dom.minidom._clone_node = _clone_node
| gpl-3.0 |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functionslocal/vtable/dummycoding.py | 1 | 2450 | import setpath
import functions
import json
registered=True
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
class dummycoding(functions.vtable.vtbase.VT):
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ")
query = dictargs['query']
if 'metadata' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No metadata ")
metadata = json.loads(dictargs['metadata'])
cur = envars['db'].cursor()
c=cur.execute(query)
schema = cur.getdescriptionsafe()
no = 0
for myrow in c:
first_tuple = []
schema1 = []
for item in xrange(len(schema)):
if schema[item][0] in metadata:
vals = metadata[schema[item][0]].split(',')
vals.sort()
for v in vals:
newv = str(schema[item][0]) + '(' + str(v) + ')'
schema1.append(newv)
if myrow[item] == v:
first_tuple.append(1)
else :
first_tuple.append(0)
else:
# print 'no', schema[item][0]
newv = str(schema[item][0])
schema1.append(newv)
first_tuple.append(myrow[item])
if no == 0:
# print tuple((x,) for x in schema1)
yield tuple((x,) for x in schema1)
no =no+1
# print str(first_tuple)
yield tuple(first_tuple,)
def Source():
return functions.vtable.vtbase.VTGenerator(dummycoding)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.tes | mit |
sloria/TextBlob | tests/test_inflect.py | 2 | 1177 | from nose.tools import assert_equals, assert_true
from unittest import TestCase
from textblob.en.inflect import (
plural_categories,
singular_ie,
singular_irregular,
singular_uncountable,
singular_uninflected,
singularize,
pluralize
)
class InflectTestCase(TestCase):
def s_singular_pluralize_test(self):
assert_equals(pluralize('lens'), 'lenses')
def s_singular_singularize_test(self):
assert_equals(singularize('lenses'), 'lens')
def diagnoses_singularize_test(self):
assert_equals(singularize('diagnoses'), 'diagnosis')
def bus_pluralize_test(self):
assert_equals(pluralize('bus'), 'buses')
def test_all_singular_s(self):
for w in plural_categories['s-singular']:
assert_equals(singularize(pluralize(w)), w)
def test_all_singular_ie(self):
for w in singular_ie:
assert_true(pluralize(w).endswith('ies'))
assert_equals(singularize(pluralize(w)), w)
def test_all_singular_irregular(self):
for singular_w in singular_irregular.values():
assert_equals(singular_irregular[pluralize(singular_w)], singular_w)
| mit |
aquajach/sample_teach | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
bjornlevi/5thpower | nefndaralit/env/lib/python3.6/site-packages/requests/cookies.py | 133 | 18430 | # -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
from ._internal_utils import to_native_string
from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
try:
import threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers['Host'], encoding='utf-8')
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (
(domain is None or cookie.domain == domain) and
(path is None or cookie.path == path)
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.set_policy(self.get_policy())
new_cj.update(self)
return new_cj
def get_policy(self):
"""Return the CookiePolicy instance used."""
return self._policy
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = {
'version': 0,
'name': name,
'value': value,
'port': None,
'domain': '',
'path': '/',
'secure': False,
'expires': None,
'discard': True,
'comment': None,
'comment_url': None,
'rest': {'HttpOnly': None},
'rfc2109': False,
}
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
:rtype: CookieJar
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
:rtype: CookieJar
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| mit |
ahaym/eden | modules/s3db/cms.py | 4 | 67968 | # -*- coding: utf-8 -*-
""" Sahana Eden Content Management System Model
@copyright: 2012-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ContentModel",
"S3ContentMapModel",
"S3ContentOrgModel",
"S3ContentOrgGroupModel",
"S3ContentUserModel",
"cms_index",
"cms_documentation",
"cms_rheader",
"cms_customise_post_fields",
"cms_post_list_layout",
"S3CMS",
)
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3ContentModel(S3Model):
"""
Content Management System
"""
names = ("cms_series",
"cms_post",
"cms_post_id",
"cms_post_module",
"cms_tag",
"cms_tag_post",
"cms_comment",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
settings = current.deployment_settings
# ---------------------------------------------------------------------
# Series
# - lists of Posts displaying in recent-first mode
#
tablename = "cms_series"
define_table(tablename,
Field("name", length=255, notnull=True, unique=True,
label = T("Name"),
),
Field("avatar", "boolean",
default = False,
label = T("Show author picture?"),
represent = s3_yes_no_represent,
),
Field("location", "boolean",
default = False,
label = T("Show Location?"),
represent = s3_yes_no_represent,
),
Field("richtext", "boolean",
default = True,
label = T("Rich Text?"),
represent = s3_yes_no_represent,
),
Field("replies", "boolean",
default = False,
label = T("Comments permitted?"),
represent = s3_yes_no_represent,
),
s3_comments(),
# Multiple Roles (@ToDo: Implement the restriction)
s3_roles_permitted(readable = False,
writable = False
),
*s3_meta_fields())
# CRUD Strings
ADD_SERIES = T("Create Series")
crud_strings[tablename] = Storage(
label_create = ADD_SERIES,
title_display = T("Series Details"),
title_list = T("Series"),
title_update = T("Edit Series"),
title_upload = T("Import Series"),
label_list_button = T("List Series"),
msg_record_created = T("Series added"),
msg_record_modified = T("Series updated"),
msg_record_deleted = T("Series deleted"),
msg_list_empty = T("No series currently defined"))
# Reusable field
translate = settings.get_L10n_translate_cms_series()
represent = S3Represent(lookup=tablename, translate=translate)
series_id = S3ReusableField("series_id", "reference %s" % tablename,
label = T("Type"), # Even if this isn't always the use-case
ondelete = "CASCADE",
readable = False,
writable = False,
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cms_series.id",
represent)),
)
# Resource Configuration
configure(tablename,
create_next = URL(f="series", args=["[id]", "post"]),
onaccept = self.cms_series_onaccept,
)
# Components
add_components(tablename,
cms_post = "series_id",
)
# ---------------------------------------------------------------------
# Posts
# - single blocks of [rich] text which can be embedded into a page,
# be viewed as full pages or as part of a Series
#
if settings.get_cms_richtext():
body_represent = lambda body: XML(body)
body_widget = s3_richtext_widget
else:
body_represent = lambda body: XML(s3_URLise(body))
body_widget = None
tablename = "cms_post"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
series_id(),
Field("name", #notnull=True,
comment = T("This isn't visible to the published site, but is used to allow menu items to point to the page"),
label = T("Name"),
),
Field("title",
comment = T("The title of the page, as seen in the browser (optional)"),
label = T("Title"),
),
Field("body", "text", notnull=True,
label = T("Body"),
represent = body_represent,
widget = body_widget,
),
# @ToDo: Move this to link table?
# - although this makes widget hard!
self.gis_location_id(),
# @ToDo: Move this to link table?
# - although this makes widget hard!
self.pr_person_id(label = T("Contact"),
# Enable only in certain conditions
readable = False,
writable = False,
),
Field("avatar", "boolean",
default = False,
label = T("Show author picture?"),
represent = s3_yes_no_represent,
),
Field("replies", "boolean",
default = False,
label = T("Comments permitted?"),
represent = s3_yes_no_represent,
),
s3_datetime(default = "now"),
# @ToDo: Also have a datetime for 'Expires On'
Field("expired", "boolean",
default = False,
label = T("Expired?"),
represent = s3_yes_no_represent,
),
#Field("published", "boolean",
# default=True,
# label=T("Published")),
s3_comments(),
# Multiple Roles (@ToDo: Implement the restriction)
s3_roles_permitted(readable = False,
writable = False
),
*s3_meta_fields())
# CRUD Strings
ADD_POST = T("Create Post")
crud_strings[tablename] = Storage(
label_create = ADD_POST,
title_display = T("Post Details"),
title_list = T("Posts"),
title_update = T("Edit Post"),
title_upload = T("Import Posts"),
label_list_button = T("List Posts"),
msg_record_created = T("Post added"),
msg_record_modified = T("Post updated"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No posts currently available"))
# Reusable field
represent = S3Represent(lookup=tablename)
post_id = S3ReusableField("post_id", "reference %s" % tablename,
comment = S3AddResourceLink(c="cms", f="post",
title=ADD_POST,
tooltip=T("A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.")),
label = T("Post"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cms_post.id",
represent)),
sortby = "name",
)
list_fields = ["title",
"body",
"location_id",
"date",
"expired",
"comments"
]
org_field = settings.get_cms_organisation()
if org_field == "created_by$organisation_id":
org_field = "auth_user.organisation_id"
elif org_field == "post_organisation.organisation_id":
org_field = "cms_post_organisation.organisation_id"
if org_field:
list_fields.append(org_field)
filter_widgets = [S3TextFilter(["body"],
label = T("Search"),
_class = "filter-search",
#_placeholder = T("Search").upper(),
),
S3OptionsFilter("series_id",
label = T("Type"),
hidden = True,
),
S3LocationFilter("location_id",
label = T("Location"),
hidden = True,
),
S3OptionsFilter("created_by$organisation_id",
label = T("Organization"),
# Can't use this for integers, use field.represent instead
#represent = "%(name)s",
hidden = True,
),
S3DateFilter("created_on",
label = T("Date"),
hide_time = True,
hidden = True,
),
]
# Resource Configuration
configure(tablename,
context = {"event": "event.id",
"incident": "incident.id",
"location": "location_id",
"organisation": "created_by$organisation_id",
},
deduplicate = self.cms_post_duplicate,
filter_actions = [{"label": "Open Table",
"icon": "table",
"function": "newsfeed",
"method": "datalist",
},
{"label": "Open Map",
"icon": "globe",
"method": "map",
},
{"label": "Open RSS Feed",
"icon": "rss",
"format": "rss",
},
],
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = cms_post_list_layout,
list_orderby = "cms_post.date desc",
onaccept = self.cms_post_onaccept,
orderby = "cms_post.date desc",
summary = [{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
#{"name": "report",
# "label": "Report",
# "widgets": [{"method": "report",
# "ajax_init": True}]
# },
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = "doc_entity",
)
# Components
add_components(tablename,
cms_comment = "post_id",
cms_post_layer = "post_id",
cms_post_module = "post_id",
cms_post_user = {"name": "bookmark",
"joinby": "post_id",
},
cms_tag = {"link": "cms_tag_post",
"joinby": "post_id",
"key": "tag_id",
"actuate": "hide",
},
# For filter widget
cms_tag_post = "post_id",
cms_post_organisation = {"joinby": "post_id",
# @ToDo: deployment_setting
"multiple": False,
},
cms_post_organisation_group = {"joinby": "post_id",
# @ToDo: deployment_setting
"multiple": False,
},
# For InlineForm to tag Posts to Events/Incidents/Incident Types
event_post = (# Events
{"name": "event_post",
"joinby": "post_id",
},
# Incidents
{"name": "incident_post",
"joinby": "post_id",
}
),
event_post_incident_type = "post_id",
# For Profile to filter appropriately
event_event = {"link": "event_post",
"joinby": "post_id",
"key": "event_id",
"actuate": "hide",
},
event_incident = {"link": "event_post",
"joinby": "post_id",
"key": "incident_id",
"actuate": "hide",
},
event_incident_type = {"link": "event_post_incident_type",
"joinby": "post_id",
"key": "incident_type_id",
"actuate": "hide",
},
)
# Custom Methods
set_method("cms", "post",
method = "add_tag",
action = self.cms_add_tag)
set_method("cms", "post",
method = "remove_tag",
action = self.cms_remove_tag)
set_method("cms", "post",
method = "add_bookmark",
action = self.cms_add_bookmark)
set_method("cms", "post",
method = "remove_bookmark",
action = self.cms_remove_bookmark)
# ---------------------------------------------------------------------
# Modules/Resources <> Posts link table
#
tablename = "cms_post_module"
define_table(tablename,
post_id(empty=False),
Field("module",
comment = T("If you specify a module then this will be used as the text in that module's index page"),
label = T("Module"),
),
Field("resource",
comment = T("If you specify a resource then this will be used as the text in that resource's summary page"),
label = T("Resource"),
),
#Field("record",
# comment = T("If you specify a record then this will be used as a hyperlink to that resource"),
# label = T("Record"),
# ),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Post"),
title_display = T("Post Details"),
title_list = T("Posts"),
title_update = T("Edit Post"),
label_list_button = T("List Posts"),
msg_record_created = T("Post set as Module/Resource homepage"),
msg_record_modified = T("Post updated"),
msg_record_deleted = T("Post removed"),
msg_list_empty = T("No posts currently set as module/resource homepages"))
# ---------------------------------------------------------------------
# Tags
#
tablename = "cms_tag"
define_table(tablename,
Field("name",
label = T("Tag"),
),
s3_comments(),
# Multiple Roles (@ToDo: Implement the restriction)
#s3_roles_permitted(readable = False,
# writable = False
# ),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Tag"),
title_display = T("Tag Details"),
title_list = T("Tags"),
title_update = T("Edit Tag"),
title_upload = T("Import Tags"),
label_list_button = T("List Tags"),
msg_record_created = T("Tag added"),
msg_record_modified = T("Tag updated"),
msg_record_deleted = T("Tag deleted"),
msg_list_empty = T("No tags currently defined"))
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
tag_id = S3ReusableField("tag_id", "reference %s" % tablename,
label = T("Tag"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cms_tag.id",
represent)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Tags <> Posts link table
#
tablename = "cms_tag_post"
define_table(tablename,
post_id(empty = False),
tag_id(empty = False),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Tag Post"),
title_display = T("Tag Details"),
title_list = T("Tags"),
title_update = T("Edit Tag"),
title_upload = T("Import Tags"),
label_list_button = T("List Tagged Posts"),
msg_record_created = T("Post Tagged"),
msg_record_modified = T("Tag updated"),
msg_record_deleted = T("Tag removed"),
msg_list_empty = T("No posts currently tagged"))
# ---------------------------------------------------------------------
# Comments
# - threaded comments on Posts
#
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
tablename = "cms_comment"
define_table(tablename,
Field("parent", "reference cms_comment",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cms_comment.id")),
readable = False,
),
post_id(empty=False),
Field("body", "text", notnull=True,
label = T("Comment"),
),
*s3_meta_fields())
# Resource Configuration
configure(tablename,
list_fields = ["id",
"post_id",
"created_by",
"modified_on"
],
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(cms_post_id = post_id,
)
# -------------------------------------------------------------------------
def defaults(self):
"""
Safe defaults for model-global names in case module is disabled
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(cms_post_id = lambda **attr: dummy("post_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def cms_series_onaccept(form):
"""
Cascade values down to all component Posts
"""
form_vars = form.vars
db = current.db
table = db.cms_post
query = (table.series_id == form_vars.id)
db(query).update(avatar = form_vars.avatar,
replies = form_vars.replies,
roles_permitted = form_vars.roles_permitted,
)
# -------------------------------------------------------------------------
@staticmethod
def cms_post_duplicate(item):
"""
CMS Post Import - Update Detection (primarily for non-blog
contents such as homepage, module index pages, summary pages,
or online documentation):
- same name and series => same post
@param item: the import item
@todo: if no name present => use cms_post_module component
to identify updates (also requires deduplication of
cms_post_module component)
"""
data = item.data
name = data.get("name")
series_id = data.get("series_id")
if not name:
return
table = item.table
query = (table.name == name) & \
(table.series_id == series_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def cms_post_onaccept(form):
"""
Handle the case where the page is for a Module home page,
Resource Summary page or Map Layer
"""
db = current.db
s3db = current.s3db
post_id = form.vars.id
get_vars = current.request.get_vars
module = get_vars.get("module", None)
if module:
table = db.cms_post_module
query = (table.module == module)
resource = get_vars.get("resource", None)
if resource:
# Resource Summary page
query &= (table.resource == resource)
else:
# Module home page
query &= ((table.resource == None) | \
(table.resource == "index"))
result = db(query).update(post_id=post_id)
if not result:
table.insert(post_id=post_id,
module=module,
resource=resource,
)
layer_id = get_vars.get("layer_id", None)
if layer_id:
table = s3db.cms_post_layer
query = (table.layer_id == layer_id)
result = db(query).update(post_id=post_id)
if not result:
table.insert(post_id=post_id,
layer_id=layer_id,
)
# Read record
table = db.cms_post
record = db(table.id == post_id).select(table.person_id,
table.created_by,
limitby=(0, 1)
).first()
if record.created_by and not record.person_id:
# Set from Author
ptable = s3db.pr_person
putable = s3db.pr_person_user
query = (putable.user_id == record.created_by) & \
(putable.pe_id == ptable.pe_id)
person = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if person:
db(table.id == post_id).update(person_id=person.id)
# -----------------------------------------------------------------------------
@staticmethod
def cms_add_tag(r, **attr):
"""
Add a Tag to a Post
S3Method for interactive requests
- designed to be called as an afterTagAdded callback to tag-it.js
"""
post_id = r.id
if not post_id or len(r.args) < 3:
raise HTTP(501, current.ERROR.BAD_METHOD)
tag = r.args[2]
db = current.db
ttable = db.cms_tag
ltable = db.cms_tag_post
exists = db(ttable.name == tag).select(ttable.id,
ttable.deleted,
ttable.deleted_fk,
limitby=(0, 1)
).first()
if exists:
tag_id = exists.id
if exists.deleted:
if exists.deleted_fk:
data = json.loads(exists.deleted_fk)
data["deleted"] = False
else:
data = dict(deleted=False)
db(ttable.id == tag_id).update(**data)
else:
tag_id = ttable.insert(name=tag)
query = (ltable.tag_id == tag_id) & \
(ltable.post_id == post_id)
exists = db(query).select(ltable.id,
ltable.deleted,
ltable.deleted_fk,
limitby=(0, 1)
).first()
if exists:
if exists.deleted:
if exists.deleted_fk:
data = json.loads(exists.deleted_fk)
data["deleted"] = False
else:
data = dict(deleted=False)
db(ltable.id == exists.id).update(**data)
else:
ltable.insert(post_id = post_id,
tag_id = tag_id,
)
output = current.xml.json_message(True, 200, "Tag Added")
current.response.headers["Content-Type"] = "application/json"
return output
# -----------------------------------------------------------------------------
@staticmethod
def cms_remove_tag(r, **attr):
"""
Remove a Tag from a Post
S3Method for interactive requests
- designed to be called as an afterTagRemoved callback to tag-it.js
"""
post_id = r.id
if not post_id or len(r.args) < 3:
raise HTTP(501, current.ERROR.BAD_METHOD)
tag = r.args[2]
db = current.db
ttable = db.cms_tag
exists = db(ttable.name == tag).select(ttable.id,
ttable.deleted,
limitby=(0, 1)
).first()
if exists:
tag_id = exists.id
ltable = db.cms_tag_post
query = (ltable.tag_id == tag_id) & \
(ltable.post_id == post_id)
exists = db(query).select(ltable.id,
ltable.deleted,
limitby=(0, 1)
).first()
if exists and not exists.deleted:
resource = current.s3db.resource("cms_tag_post", id=exists.id)
resource.delete()
output = current.xml.json_message(True, 200, "Tag Removed")
current.response.headers["Content-Type"] = "application/json"
return output
# -----------------------------------------------------------------------------
@staticmethod
def cms_add_bookmark(r, **attr):
"""
Bookmark a Post
S3Method for interactive requests
"""
post_id = r.id
user = current.auth.user
user_id = user and user.id
if not post_id or not user_id:
raise HTTP(501, current.ERROR.BAD_METHOD)
db = current.db
ltable = db.cms_post_user
query = (ltable.post_id == post_id) & \
(ltable.user_id == user_id)
exists = db(query).select(ltable.id,
ltable.deleted,
ltable.deleted_fk,
limitby=(0, 1)
).first()
if exists:
link_id = exists.id
if exists.deleted:
if exists.deleted_fk:
data = json.loads(exists.deleted_fk)
data["deleted"] = False
else:
data = dict(deleted=False)
db(ltable.id == link_id).update(**data)
else:
link_id = ltable.insert(post_id = post_id,
user_id = user_id,
)
output = current.xml.json_message(True, 200, "Bookmark Added")
current.response.headers["Content-Type"] = "application/json"
return output
# -----------------------------------------------------------------------------
@staticmethod
def cms_remove_bookmark(r, **attr):
"""
Remove a Bookmark for a Post
S3Method for interactive requests
"""
post_id = r.id
user = current.auth.user
user_id = user and user.id
if not post_id or not user_id:
raise HTTP(501, current.ERROR.BAD_METHOD)
db = current.db
ltable = db.cms_post_user
query = (ltable.post_id == post_id) & \
(ltable.user_id == user_id)
exists = db(query).select(ltable.id,
ltable.deleted,
limitby=(0, 1)
).first()
if exists and not exists.deleted:
resource = current.s3db.resource("cms_post_user", id=exists.id)
resource.delete()
output = current.xml.json_message(True, 200, "Bookmark Removed")
current.response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3ContentMapModel(S3Model):
"""
Use of the CMS to provide extra data about Map Layers
"""
names = ("cms_post_layer",)
def model(self):
# ---------------------------------------------------------------------
# Layers <> Posts link table
#
tablename = "cms_post_layer"
self.define_table(tablename,
self.cms_post_id(empty = False),
self.super_link("layer_id", "gis_layer_entity"),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3ContentOrgModel(S3Model):
"""
Link Posts to Organisations
"""
names = ("cms_post_organisation",)
def model(self):
# ---------------------------------------------------------------------
# Organisations <> Posts link table
#
tablename = "cms_post_organisation"
self.define_table(tablename,
self.cms_post_id(empty = False,
ondelete = "CASCADE",
),
self.org_organisation_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3ContentOrgGroupModel(S3Model):
"""
Link Posts to Organisation Groups (Coalitions/Networks)
"""
names = ("cms_post_organisation_group",)
def model(self):
# ---------------------------------------------------------------------
# Organisation Groups <> Posts link table
#
tablename = "cms_post_organisation_group"
self.define_table(tablename,
self.cms_post_id(empty=False),
self.org_group_id(empty=False),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3ContentUserModel(S3Model):
"""
Link Posts to Users to allow Users to Bookmark posts
"""
names = ("cms_post_user",)
def model(self):
# ---------------------------------------------------------------------
# Users <> Posts link table
#
tablename = "cms_post_user"
self.define_table(tablename,
self.cms_post_id(empty=False),
Field("user_id", current.auth.settings.table_user),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
def cms_rheader(r, tabs=[]):
""" CMS Resource Headers """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
table = r.table
resourcename = r.name
T = current.T
if resourcename == "series":
# Tabs
tabs = [(T("Basic Details"), None),
(T("Posts"), "post"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name
),
), rheader_tabs)
elif resourcename == "post":
# Tabs
tabs = [(T("Basic Details"), None),
]
if record.replies:
tabs.append((T("Comments"), "discuss"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
record.name
),
), rheader_tabs)
return rheader
# =============================================================================
def cms_index(module, resource=None, page_name=None, alt_function=None):
"""
Return a module index page retrieved from CMS
- or run an alternate function if not found
"""
response = current.response
settings = current.deployment_settings
if not page_name:
page_name = settings.modules[module].name_nice
response.title = page_name
item = None
if settings.has_module("cms") and not settings.get_cms_hide_index(module):
db = current.db
table = current.s3db.cms_post
ltable = db.cms_post_module
query = (ltable.module == module) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
if resource is None:
query &= ((ltable.resource == None) | \
(ltable.resource == "index"))
else:
query &= (ltable.resource == resource)
_item = db(query).select(table.id,
table.body,
table.title,
limitby=(0, 1)).first()
# @ToDo: Replace this crude check with?
#if current.auth.s3_has_permission("update", table, record_id=_item.id):
auth = current.auth
ADMIN = auth.get_system_roles().ADMIN
ADMIN = auth.s3_has_role(ADMIN)
get_vars = {"module": module}
if resource:
get_vars["resource"] = resource
if _item:
if _item.title:
response.title = _item.title
if ADMIN:
item = DIV(XML(_item.body),
BR(),
A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[_item.id, "update"],
vars=get_vars),
_class="action-btn"))
else:
item = XML(_item.body)
elif ADMIN:
item = DIV(H2(page_name),
A(current.T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars=get_vars),
_class="action-btn"))
if not item:
if alt_function:
# Serve the alternate controller function
# Copied from gluon.main serve_controller()
# (We don't want to re-run models)
from gluon.compileapp import build_environment, run_controller_in, run_view_in
request = current.request
environment = build_environment(request, response, current.session)
environment["settings"] = settings
environment["s3db"] = current.s3db
# Retain certain globals (extend as needed):
g = globals()
environment["s3base"] = g.get("s3base")
environment["s3_redirect_default"] = g.get("s3_redirect_default")
page = run_controller_in(request.controller, alt_function, environment)
if isinstance(page, dict):
response._vars = page
response._view_environment.update(page)
run_view_in(response._view_environment)
page = response.body.getvalue()
# Set default headers if not set
default_headers = [
("Content-Type", contenttype("." + request.extension)),
("Cache-Control",
"no-store, no-cache, must-revalidate, post-check=0, pre-check=0"),
("Expires", time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())),
("Pragma", "no-cache")]
for key, value in default_headers:
response.headers.setdefault(key, value)
raise HTTP(response.status, page, **response.headers)
else:
item = H2(page_name)
# tbc
report = ""
response.view = "index.html"
return dict(item=item, report=report)
# =============================================================================
def cms_documentation(r, default_page, default_url):
"""
Render an online documentation page, to be called from prep
@param r: the S3Request
@param default_page: the default page name
@param default_url: the default URL if no contents found
"""
row = r.record
if not row:
# Find the CMS page
name = r.get_vars.get("name", default_page)
table = r.resource.table
query = (table.name == name) & (table.deleted != True)
row = current.db(query).select(table.id,
table.title,
table.body,
limitby=(0, 1)).first()
if not row:
if name != default_page:
# Error - CMS page not found
r.error(404, current.T("Page not found"),
next=URL(args=current.request.args, vars={}),
)
else:
# No CMS contents for module homepage found at all
# => redirect to default page (preserving all errors)
from s3 import s3_redirect_default
s3_redirect_default(default_url)
# Render the page
from s3 import S3XMLContents
return {"bypass": True,
"output": {"title": row.title,
"contents": S3XMLContents(row.body),
},
}
# =============================================================================
class S3CMS(S3Method):
"""
Class to generate a Rich Text widget to embed in a page
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply cms method to S3Requests
- produces a full page with a Richtext widget
@param r: the S3Request
@param attr: dictionary of parameters for the method handler
@return: output object to send to the view
"""
# Not Implemented
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def widget(self, r, method="cms", widget_id=None, **attr):
"""
Render a Rich Text widget suitable for use in a page such as
S3Summary
@param method: the widget method
@param r: the S3Request
@param attr: controller attributes
@ToDo: Support comments
"""
if not current.deployment_settings.has_module("cms"):
return ""
# This is currently assuming that we're being used in a Summary page or similar
request = current.request
return self.resource_content(request.controller,
request.function,
widget_id)
# -------------------------------------------------------------------------
@staticmethod
def resource_content(module, resource, widget_id=None):
db = current.db
table = current.s3db.cms_post
ltable = db.cms_post_module
query = (ltable.module == module) & \
(ltable.resource == resource) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
_item = db(query).select(table.id,
table.body,
limitby=(0, 1)).first()
# @ToDo: Replace this crude check with?
#if current.auth.s3_has_permission("update", r.table, record_id=r.id):
auth = current.auth
ADMIN = auth.get_system_roles().ADMIN
ADMIN = auth.s3_has_role(ADMIN)
if _item:
if ADMIN:
if current.response.s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
item = DIV(XML(_item.body),
A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[_item.id, "update"],
vars={"module": module,
"resource": resource
}),
_class="%s cms-edit" % _class))
else:
item = XML(_item.body)
elif ADMIN:
if current.response.s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
item = A(current.T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module": module,
"resource": resource
}),
_class="%s cms-edit" % _class)
else:
item = ""
output = DIV(item, _id=widget_id, _class="cms_content")
return output
# =============================================================================
def cms_customise_post_fields():
"""
Customize cms_post fields for the Newsfeed / Home Pages
"""
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
org_field = settings.get_cms_organisation()
if org_field == "created_by$organisation_id":
current.auth.settings.table_user.organisation_id.represent = \
s3db.org_organisation_represent
elif org_field == "post_organisation.organisation_id":
s3db.cms_post_organisation.organisation_id.label = ""
org_group_field = settings.get_cms_organisation_group()
if org_group_field == "created_by$org_group_id":
current.auth.settings.table_user.org_group_id.represent = \
s3db.org_organisation_group_represent
elif org_group_field == "post_organisation_group.group_id":
s3db.cms_post_organisation_group.group_id.label = ""
table = s3db.cms_post
table.series_id.requires = table.series_id.requires.other
contact_field = settings.get_cms_person()
if contact_field == "created_by":
table.created_by.represent = s3_auth_user_represent_name
elif contact_field == "person_id":
field = table.person_id
field.readable = True
field.writable = True
field.comment = None
# Default now
#field.requires = IS_ADD_PERSON_WIDGET2()
field.widget = S3AddPersonWidget2(controller="pr")
field = table.location_id
field.label = ""
field.represent = s3db.gis_LocationRepresent(sep=" | ")
# Required
field.requires = IS_LOCATION()
list_fields = ["series_id",
"location_id",
"date",
]
lappend = list_fields.append
if settings.get_cms_show_titles():
lappend("title")
lappend("body")
if contact_field:
lappend(contact_field)
if org_field:
lappend(org_field)
if org_group_field:
lappend(org_group_field)
if settings.get_cms_show_attachments():
lappend("document.file")
if settings.get_cms_show_links():
lappend("document.url")
if settings.get_cms_show_events():
lappend("event_post.event_id")
if settings.get_cms_location_click_filters():
script = \
'''S3.filter_location=function(d){var cb
for(var p in d){cb=$('input[name="multiselect_post-cms_post_location_id-location-filter-L'+p+'"][value="'+d[p]+'"]')
if(!cb.prop('checked')){cb.click()}}}'''
s3.jquery_ready.append(script)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
lappend("location_id$%s" % level)
if settings.get_cms_show_tags():
lappend("tag.name")
if s3.debug:
s3.scripts.append("/%s/static/scripts/tag-it.js" % current.request.application)
else:
s3.scripts.append("/%s/static/scripts/tag-it.min.js" % current.request.application)
if current.auth.s3_has_permission("update", current.db.cms_tag_post):
readonly = '''afterTagAdded:function(event,ui){
if(ui.duringInitialization){return}
var post_id=$(this).attr('data-post_id')
var url=S3.Ap.concat('/cms/post/',post_id,'/add_tag/',ui.tagLabel)
$.getS3(url)
},afterTagRemoved:function(event,ui){
var post_id=$(this).attr('data-post_id')
var url=S3.Ap.concat('/cms/post/',post_id,'/remove_tag/',ui.tagLabel)
$.getS3(url)
},'''
else:
readonly = '''readOnly:true'''
script = \
'''S3.tagit=function(){$('.s3-tags').tagit({autocomplete:{source:'%s'},%s})}
S3.tagit()
S3.redraw_fns.push('tagit')''' % (URL(c="cms", f="tag",
args="search_ac.json"),
readonly)
s3.jquery_ready.append(script)
s3db.configure("cms_post",
list_fields = list_fields,
)
return table
# =============================================================================
def cms_post_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CMS Posts on the
Home & News Feed pages.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cms_post.id"]
item_class = "thumbnail"
db = current.db
s3db = current.s3db
settings = current.deployment_settings
NONE = current.messages["NONE"]
org_field = settings.get_cms_organisation()
# Convert to the right format for this context
if org_field == "created_by$organisation_id":
org_field = "auth_user.organisation_id"
elif org_field == "post_organisation.organisation_id":
org_field = "cms_post_organisation.organisation_id"
org_group_field = settings.get_cms_organisation_group()
# Convert to the right format for this context
if org_group_field == "created_by$org_group_id":
org_group_field = "auth_user.org_group_id"
elif org_group_field == "post_organisation_group.group_id":
org_group_field = "cms_post_organisation_group.group_id"
raw = record._row
body = record["cms_post.body"]
series_id = raw["cms_post.series_id"]
title = record["cms_post.title"]
if title and title != NONE:
subtitle = [DIV(title,
_class="card-subtitle"
)
]
else:
subtitle = []
for event_resource in ["event", "incident"]:
label = record["event_post.%s_id" % event_resource]
if label and label != NONE:
link=URL(c="event", f=event_resource,
args=[raw["event_post.%s_id" % event_resource],
"profile"]
)
subtitle.append(DIV(A(ICON(event_resource),
label,
_href=link,
_target="_blank",
),
_class="card-subtitle"
))
if subtitle:
subtitle.append(body)
body = TAG[""](*subtitle)
date = record["cms_post.date"]
date = SPAN(date,
_class="date-title",
)
location_id = raw["cms_post.location_id"]
if location_id:
location = record["cms_post.location_id"]
if settings.get_cms_location_click_filters():
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
data = {}
for level in levels:
data[level[1:]] = raw["gis_location.%s" % level]
onclick = '''S3.filter_location(%s)''' % json.dumps(data, separators=SEPARATORS)
location = SPAN(A(location,
_href="#",
_onclick=onclick,
),
_class="location-title",
)
else:
location_url = URL(c="gis", f="location", args=[location_id, "profile"])
location = SPAN(A(location,
_href=location_url,
),
_class="location-title",
)
else:
location = ""
person = ""
contact_field = settings.get_cms_person()
if contact_field == "created_by":
author_id = raw["cms_post.created_by"]
person = record["cms_post.created_by"]
# @ToDo: Bulk lookup
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_id = row.id
else:
person_id = None
elif contact_field == "person_id":
person_id = raw["cms_post.person_id"]
if person_id:
person = record["cms_post.person_id"]
else:
person_id = None
if person:
if person_id:
# @ToDo: deployment_setting for controller to use?
person_url = URL(c="pr", f="person", args=[person_id])
else:
person_url = "#"
person = A(person,
_href=person_url,
)
avatar = ""
organisation = ""
if org_field:
organisation_id = raw[org_field]
if organisation_id:
organisation = record[org_field]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
organisation = A(organisation,
_href=org_url,
_class="card-organisation",
)
# Avatar
# Try Organisation Logo
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px",
_class="media-object")
else:
avatar = organisation
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
org_group = ""
if org_group_field:
org_group_id = raw[org_group_field]
if org_group_id:
org_group = record[org_group_field]
org_group_url = URL(c="org", f="group", args=[org_group_id, "profile"])
org_group = A(org_group,
_href=org_group_url,
_class="card-org-group",
)
if not avatar and person_id:
# Personal Avatar
avatar = s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object")
avatar = A(avatar,
_href=person_url,
_class="pull-left",
)
if person and organisation:
card_person = DIV(person,
" - ",
organisation,
_class="card-person",
)
elif person and org_group:
card_person = DIV(person,
" - ",
org_group,
_class="card-person",
)
elif person:
card_person = DIV(person,
_class="card-person",
)
#elif organisation:
# card_person = DIV(organisation,
# _class="card-person",
# )
elif org_group:
card_person = DIV(org_group,
_class="card-person",
)
else:
card_person = DIV(_class="card-person",
)
permit = current.auth.s3_has_permission
table = db.cms_post
updateable = permit("update", table, record_id=record_id)
if settings.get_cms_show_tags():
tags = raw["cms_tag.name"]
if tags or updateable:
tag_list = UL(_class="s3-tags",
)
tag_list["_data-post_id"] = record_id
else:
tag_list = ""
if tags:
if not isinstance(tags, list):
tags = [tags]#.split(", ")
for tag in tags:
tag_item = LI(tag)
tag_list.append(tag_item)
tags = tag_list
else:
tags = ""
T = current.T
if series_id:
series = record["cms_post.series_id"]
translate = settings.get_L10n_translate_cms_series()
if translate:
series_title = T(series)
else:
series_title = series
else:
series_title = series = ""
request = current.request
# Tool box
if updateable:
if request.function == "newsfeed":
fn = "newsfeed"
else:
fn = "post"
edit_btn = A(ICON("edit"),
_href=URL(c="cms", f=fn,
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}
),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=series_title),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
)
else:
delete_btn = ""
user = current.auth.user
if user and settings.get_cms_bookmarks():
ltable = s3db.cms_post_user
query = (ltable.post_id == record_id) & \
(ltable.user_id == user.id)
exists = db(query).select(ltable.id,
limitby=(0, 1)
).first()
if exists:
bookmark_btn = A(ICON("bookmark"),
_onclick="$.getS3('%s',function(){$('#%s').datalist('ajaxReloadItem',%s)})" %
(URL(c="cms", f="post",
args=[record_id, "remove_bookmark"]),
list_id,
record_id),
_title=T("Remove Bookmark"),
)
else:
bookmark_btn = A(ICON("bookmark-empty"),
_onclick="$.getS3('%s',function(){$('#%s').datalist('ajaxReloadItem',%s)})" %
(URL(c="cms", f="post",
args=[record_id, "add_bookmark"]),
list_id,
record_id),
_title=T("Add Bookmark"),
)
else:
bookmark_btn = ""
toolbox = DIV(bookmark_btn,
edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = [documents]
doc_list_id = "attachments-%s" % item_id
doc_list = UL(_class="f-dropdown dropdown-menu",
_role="menu",
_id=doc_list_id,
data={"dropdown-content": ""},
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except (IOError, TypeError):
doc_name = NONE
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(ICON("file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(ICON("attachment"),
SPAN(_class="caret"),
_class="btn dropdown-toggle dropdown",
_href="#",
data={"toggle": "dropdown",
"dropdown": doc_list_id,
},
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
links = raw["doc_document.url"]
if links:
if not isinstance(links, list):
links = [links]
link_list = DIV(_class="media card-links")
for link in links:
link_item = A(ICON("link"),
" ",
link,
_href=link,
_target="_blank",
_class="card-link",
)
link_list.append(link_item)
else:
link_list = ""
if "profile" in request.args:
# Single resource list
# - don't show series_title
if settings.get_cms_show_titles():
title = raw["cms_post.title"] or ""
else:
title = ""
card_label = SPAN(" %s" % title,
_class="card-title")
else:
# Mixed resource lists (Home, News Feed)
icon = series.lower().replace(" ", "_")
series_title = SPAN(" %s" % series_title,
_class="card-title")
if settings.get_cms_show_titles() and raw["cms_post.title"]:
title = SPAN(raw["cms_post.title"],
_class="card-title2")
card_label = TAG[""](ICON(icon),
series_title,
title)
else:
card_label = TAG[""](ICON(icon),
series_title)
# Type cards
if series == "Alert":
# Apply additional highlighting for Alerts
item_class = "%s disaster" % item_class
# Render the item
if series == "Event" and "newsfeed" not in request.args: # and request.function != "newsfeed"
# Events on Homepage have a different header
date.add_class("event")
header = DIV(date,
location,
toolbox,
_class="card-header",
)
else:
header = DIV(card_label,
location,
date,
toolbox,
_class="card-header",
)
item = DIV(header,
DIV(avatar,
DIV(DIV(body,
card_person,
_class="media",
),
_class="media-body",
),
_class="media",
),
tags,
docs,
link_list,
_class=item_class,
_id=item_id,
)
return item
# END =========================================================================
| mit |
ansible/ansible-modules-extras | windows/win_iis_webapppool.py | 11 | 3662 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_iis_webapppool
version_added: "2.0"
short_description: Configures a IIS Web Application Pool.
description:
- Creates, Removes and configures a IIS Web Application Pool
options:
name:
description:
- Names of application pool
required: true
default: null
aliases: []
state:
description:
- State of the binding
choices:
- absent
- stopped
- started
- restarted
required: false
default: null
aliases: []
attributes:
description:
- Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
required: false
default: null
aliases: []
author: Henrik Wallström
'''
EXAMPLES = '''
# This return information about an existing application pool
$ansible -i inventory -m win_iis_webapppool -a "name='DefaultAppPool'" windows
host | success >> {
"attributes": {},
"changed": false,
"info": {
"attributes": {
"CLRConfigFile": "",
"applicationPoolSid": "S-1-5-82-3006700770-424185619-1745488364-794895919-4004696415",
"autoStart": true,
"enable32BitAppOnWin64": false,
"enableConfigurationOverride": true,
"managedPipelineMode": 0,
"managedRuntimeLoader": "webengine4.dll",
"managedRuntimeVersion": "v4.0",
"name": "DefaultAppPool",
"passAnonymousToken": true,
"queueLength": 1000,
"startMode": 0,
"state": 1
},
"name": "DefaultAppPool",
"state": "Started"
}
}
# This creates a new application pool in 'Started' state
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=started" windows
# This stoppes an application pool
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=stopped" windows
# This restarts an application pool
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
# This restarts an application pool
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' state=restart" windows
# This change application pool attributes without touching state
$ ansible -i inventory -m win_iis_webapppool -a "name='AppPool' attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
# This creates an application pool and sets attributes
$ ansible -i inventory -m win_iis_webapppool -a "name='AnotherAppPool' state=started attributes='managedRuntimeVersion:v4.0|autoStart:false'" windows
# Playbook example
---
- name: App Pool with .NET 4.0
win_iis_webapppool:
name: 'AppPool'
state: started
attributes: managedRuntimeVersion:v4.0
register: webapppool
'''
| gpl-3.0 |
Tennyson53/SUR | magnum/tests/unit/common/cert_manager/test_cert_manager.py | 6 | 1550 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import fixture
from magnum.common import cert_manager
from magnum.common.cert_manager import barbican_cert_manager as bcm
from magnum.common.cert_manager import get_backend
from magnum.common.cert_manager import local_cert_manager as lcm
from magnum.tests import base
class TestCertManager(base.BaseTestCase):
def setUp(self):
cert_manager._CERT_MANAGER_PLUGIN = None
super(TestCertManager, self).setUp()
def test_barbican_cert_manager(self):
fixture.Config().config(group='certificates',
cert_manager_type='barbican')
self.assertEqual(get_backend().CertManager,
bcm.CertManager)
def test_local_cert_manager(self):
fixture.Config().config(group='certificates',
cert_manager_type='local')
self.assertEqual(get_backend().CertManager,
lcm.CertManager)
| apache-2.0 |
0x0all/SASM | Windows/MinGW64/opt/lib/python2.7/encodings/cp1140.py | 593 | 13361 | """ Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1140',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\u20ac' # 0x9F -> EURO SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
mj10777/QGIS | cmake/FindQsci.py | 77 | 2612 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Larry Shaffer <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Larry Shaffer <[email protected]> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Larry Shaffer <[email protected]> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Larry Shaffer <[email protected]> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Find QScintilla2 PyQt4/PyQt5 module version.
.. note:: Redistribution and use is allowed according to the terms of the BSD
license. For details see the accompanying COPYING-CMAKE-SCRIPTS file.
"""
__author__ = 'Larry Shaffer ([email protected])'
__date__ = '22/10/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import sys
VER = ""
if len(sys.argv) > 0:
if sys.argv[1] == "4":
from PyQt4.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
else:
from PyQt5.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
else:
try:
from PyQt4.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
except ImportError:
try:
from PyQt5.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
except ImportError:
pass
print("qsci_version_str:%s" % VER)
| gpl-2.0 |
ludwiktrammer/odoo | addons/account/report/account_balance.py | 22 | 3313 | # -*- coding: utf-8 -*-
import time
from openerp import api, models
class ReportTrialBalance(models.AbstractModel):
_name = 'report.account.report_trialbalance'
def _get_accounts(self, accounts, display_account):
""" compute the balance, debit and credit for the provided accounts
:Arguments:
`accounts`: list of accounts record,
`display_account`: it's used to display either all accounts or those accounts which balance is > 0
:Returns a list of dictionary of Accounts with following key and value
`name`: Account name,
`code`: Account code,
`credit`: total amount of credit,
`debit`: total amount of debit,
`balance`: total amount of balance,
"""
account_result = {}
# Prepare sql query base on selected parameters from wizard
tables, where_clause, where_params = self.env['account.move.line']._query_get()
tables = tables.replace('"','')
if not tables:
tables = 'account_move_line'
wheres = [""]
if where_clause.strip():
wheres.append(where_clause.strip())
filters = " AND ".join(wheres)
# compute the balance, debit and credit for the provided accounts
request = ("SELECT account_id AS id, SUM(debit) AS debit, SUM(credit) AS credit, (SUM(debit) - SUM(credit)) AS balance" +\
" FROM " + tables + " WHERE account_id IN %s " + filters + " GROUP BY account_id")
params = (tuple(accounts.ids),) + tuple(where_params)
self.env.cr.execute(request, params)
for row in self.env.cr.dictfetchall():
account_result[row.pop('id')] = row
account_res = []
for account in accounts:
res = dict((fn, 0.0) for fn in ['credit', 'debit', 'balance'])
currency = account.currency_id and account.currency_id or account.company_id.currency_id
res['code'] = account.code
res['name'] = account.name
if account.id in account_result.keys():
res['debit'] = account_result[account.id].get('debit')
res['credit'] = account_result[account.id].get('credit')
res['balance'] = account_result[account.id].get('balance')
if display_account == 'all':
account_res.append(res)
if display_account in ['movement', 'not_zero'] and not currency.is_zero(res['balance']):
account_res.append(res)
return account_res
@api.multi
def render_html(self, data):
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_id'))
display_account = data['form'].get('display_account')
accounts = self.env['account.account'].search([])
account_res = self.with_context(data['form'].get('used_context'))._get_accounts(accounts, display_account)
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'Accounts': account_res,
}
return self.env['report'].render('account.report_trialbalance', docargs)
| agpl-3.0 |
matrix-org/synapse | tests/replication/test_sharded_event_persister.py | 1 | 12377 | # Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from unittest.mock import patch
from synapse.api.room_versions import RoomVersion
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from synapse.rest.client.v2_alpha import sync
from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.server import make_request
from tests.utils import USE_POSTGRES_FOR_TESTS
logger = logging.getLogger(__name__)
class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
"""Checks event persisting sharding works"""
# Event persister sharding requires postgres (due to needing
# `MultiWriterIdGenerator`).
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor, clock, hs):
# Register a user who sends a message that we'll get notified about
self.other_user_id = self.register_user("otheruser", "pass")
self.other_access_token = self.login("otheruser", "pass")
self.room_creator = self.hs.get_room_creation_handler()
self.store = hs.get_datastore()
def default_config(self):
conf = super().default_config()
conf["redis"] = {"enabled": "true"}
conf["stream_writers"] = {"events": ["worker1", "worker2"]}
conf["instance_map"] = {
"worker1": {"host": "testserv", "port": 1001},
"worker2": {"host": "testserv", "port": 1002},
}
return conf
def _create_room(self, room_id: str, user_id: str, tok: str):
"""Create a room with given room_id"""
# We control the room ID generation by patching out the
# `_generate_room_id` method
async def generate_room(
creator_id: str, is_public: bool, room_version: RoomVersion
):
await self.store.store_room(
room_id=room_id,
room_creator_user_id=creator_id,
is_public=is_public,
room_version=room_version,
)
return room_id
with patch(
"synapse.handlers.room.RoomCreationHandler._generate_room_id"
) as mock:
mock.side_effect = generate_room
self.helper.create_room_as(user_id, tok=tok)
def test_basic(self):
"""Simple test to ensure that multiple rooms can be created and joined,
and that different rooms get handled by different instances.
"""
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker1"},
)
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker2"},
)
persisted_on_1 = False
persisted_on_2 = False
store = self.hs.get_datastore()
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
# Keep making new rooms until we see rooms being persisted on both
# workers.
for _ in range(10):
# Create a room
room = self.helper.create_room_as(user_id, tok=access_token)
# The other user joins
self.helper.join(
room=room, user=self.other_user_id, tok=self.other_access_token
)
# The other user sends some messages
rseponse = self.helper.send(room, body="Hi!", tok=self.other_access_token)
event_id = rseponse["event_id"]
# The event position includes which instance persisted the event.
pos = self.get_success(store.get_position_for_event(event_id))
persisted_on_1 |= pos.instance_name == "worker1"
persisted_on_2 |= pos.instance_name == "worker2"
if persisted_on_1 and persisted_on_2:
break
self.assertTrue(persisted_on_1)
self.assertTrue(persisted_on_2)
def test_vector_clock_token(self):
"""Tests that using a stream token with a vector clock component works
correctly with basic /sync and /messages usage.
"""
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker1"},
)
worker_hs2 = self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "worker2"},
)
sync_hs = self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "sync"},
)
sync_hs_site = self._hs_to_site[sync_hs]
# Specially selected room IDs that get persisted on different workers.
room_id1 = "!foo:test"
room_id2 = "!baz:test"
self.assertEqual(
self.hs.config.worker.events_shard_config.get_instance(room_id1), "worker1"
)
self.assertEqual(
self.hs.config.worker.events_shard_config.get_instance(room_id2), "worker2"
)
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
store = self.hs.get_datastore()
# Create two room on the different workers.
self._create_room(room_id1, user_id, access_token)
self._create_room(room_id2, user_id, access_token)
# The other user joins
self.helper.join(
room=room_id1, user=self.other_user_id, tok=self.other_access_token
)
self.helper.join(
room=room_id2, user=self.other_user_id, tok=self.other_access_token
)
# Do an initial sync so that we're up to date.
channel = make_request(
self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token
)
next_batch = channel.json_body["next_batch"]
# We now gut wrench into the events stream MultiWriterIdGenerator on
# worker2 to mimic it getting stuck persisting an event. This ensures
# that when we send an event on worker1 we end up in a state where
# worker2 events stream position lags that on worker1, resulting in a
# RoomStreamToken with a non-empty instance map component.
#
# Worker2's event stream position will not advance until we call
# __aexit__ again.
actx = worker_hs2.get_datastore()._stream_id_gen.get_next()
self.get_success(actx.__aenter__())
response = self.helper.send(room_id1, body="Hi!", tok=self.other_access_token)
first_event_in_room1 = response["event_id"]
# Assert that the current stream token has an instance map component, as
# we are trying to test vector clock tokens.
room_stream_token = store.get_room_max_token()
self.assertNotEqual(len(room_stream_token.instance_map), 0)
# Check that syncing still gets the new event, despite the gap in the
# stream IDs.
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/sync?since={}".format(next_batch),
access_token=access_token,
)
# We should only see the new event and nothing else
self.assertIn(room_id1, channel.json_body["rooms"]["join"])
self.assertNotIn(room_id2, channel.json_body["rooms"]["join"])
events = channel.json_body["rooms"]["join"][room_id1]["timeline"]["events"]
self.assertListEqual(
[first_event_in_room1], [event["event_id"] for event in events]
)
# Get the next batch and makes sure its a vector clock style token.
vector_clock_token = channel.json_body["next_batch"]
self.assertTrue(vector_clock_token.startswith("m"))
# Now that we've got a vector clock token we finish the fake persisting
# an event we started above.
self.get_success(actx.__aexit__(None, None, None))
# Now try and send an event to the other rooom so that we can test that
# the vector clock style token works as a `since` token.
response = self.helper.send(room_id2, body="Hi!", tok=self.other_access_token)
first_event_in_room2 = response["event_id"]
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/sync?since={}".format(vector_clock_token),
access_token=access_token,
)
self.assertNotIn(room_id1, channel.json_body["rooms"]["join"])
self.assertIn(room_id2, channel.json_body["rooms"]["join"])
events = channel.json_body["rooms"]["join"][room_id2]["timeline"]["events"]
self.assertListEqual(
[first_event_in_room2], [event["event_id"] for event in events]
)
next_batch = channel.json_body["next_batch"]
# We also want to test that the vector clock style token works with
# pagination. We do this by sending a couple of new events into the room
# and syncing again to get a prev_batch token for each room, then
# paginating from there back to the vector clock token.
self.helper.send(room_id1, body="Hi again!", tok=self.other_access_token)
self.helper.send(room_id2, body="Hi again!", tok=self.other_access_token)
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/sync?since={}".format(next_batch),
access_token=access_token,
)
prev_batch1 = channel.json_body["rooms"]["join"][room_id1]["timeline"][
"prev_batch"
]
prev_batch2 = channel.json_body["rooms"]["join"][room_id2]["timeline"][
"prev_batch"
]
# Paginating back in the first room should not produce any results, as
# no events have happened in it. This tests that we are correctly
# filtering results based on the vector clock portion.
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=b".format(
room_id1, prev_batch1, vector_clock_token
),
access_token=access_token,
)
self.assertListEqual([], channel.json_body["chunk"])
# Paginating back on the second room should produce the first event
# again. This tests that pagination isn't completely broken.
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=b".format(
room_id2, prev_batch2, vector_clock_token
),
access_token=access_token,
)
self.assertEqual(len(channel.json_body["chunk"]), 1)
self.assertEqual(
channel.json_body["chunk"][0]["event_id"], first_event_in_room2
)
# Paginating forwards should give the same results
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=f".format(
room_id1, vector_clock_token, prev_batch1
),
access_token=access_token,
)
self.assertListEqual([], channel.json_body["chunk"])
channel = make_request(
self.reactor,
sync_hs_site,
"GET",
"/rooms/{}/messages?from={}&to={}&dir=f".format(
room_id2,
vector_clock_token,
prev_batch2,
),
access_token=access_token,
)
self.assertEqual(len(channel.json_body["chunk"]), 1)
self.assertEqual(
channel.json_body["chunk"][0]["event_id"], first_event_in_room2
)
| apache-2.0 |
zsoltdudas/lis-tempest | tempest/api/compute/admin/test_fixed_ips.py | 13 | 2319 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class FixedIPsTestJson(base.BaseV2ComputeAdminTest):
@classmethod
def skip_checks(cls):
super(FixedIPsTestJson, cls).skip_checks()
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(FixedIPsTestJson, cls).setup_clients()
cls.client = cls.os_adm.fixed_ips_client
@classmethod
def resource_setup(cls):
super(FixedIPsTestJson, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.show_server(server['id'])['server']
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
cls.ip = ip['addr']
break
if cls.ip:
break
@test.idempotent_id('16b7d848-2f7c-4709-85a3-2dfb4576cc52')
@test.services('network')
def test_list_fixed_ip_details(self):
fixed_ip = self.client.show_fixed_ip(self.ip)
self.assertEqual(fixed_ip['fixed_ip']['address'], self.ip)
@test.idempotent_id('5485077b-7e46-4cec-b402-91dc3173433b')
@test.services('network')
def test_set_reserve(self):
self.client.reserve_fixed_ip(self.ip, reserve="None")
@test.idempotent_id('7476e322-b9ff-4710-bf82-49d51bac6e2e')
@test.services('network')
def test_set_unreserve(self):
self.client.reserve_fixed_ip(self.ip, unreserve="None")
| apache-2.0 |
khancyr/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/convert_uart_order.py | 21 | 1096 | #!/usr/bin/env python
'''
convert UART_ORDER in a hwdef.dat into a SERIAL_ORDER
'''
import sys, shlex
def convert_file(fname):
lines = open(fname, 'r').readlines()
for i in range(len(lines)):
if lines[i].startswith('SERIAL_ORDER'):
print("Already has SERIAL_ORDER: %s" % fname)
return
for i in range(len(lines)):
line = lines[i]
if not line.startswith('UART_ORDER'):
continue
a = shlex.split(line, posix=False)
if a[0] != 'UART_ORDER':
continue
uart_order = a[1:]
if not fname.endswith('-bl.dat'):
while len(uart_order) < 4:
uart_order += ['EMPTY']
a += ['EMPTY']
map = [ 0, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12 ]
for j in range(len(uart_order)):
a[j+1] = uart_order[map[j]]
a[0] = 'SERIAL_ORDER'
print("%s new order " % fname, a)
lines[i] = ' '.join(a) + '\n'
open(fname, 'w').write(''.join(lines))
files=sys.argv[1:]
for fname in files:
convert_file(fname)
| gpl-3.0 |
sourcepole/qgis | python/plugins/osm/OsmPlugin.py | 2 | 13996 | """@package OsmPlugin
This is the main module of the OSM Plugin.
It shows/hides all tool buttons, widgets and dialogs.
After closing dialogs it does all actions related with their return codes.
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtNetwork import *
from qgis.core import *
from OsmLoadDlg import OsmLoadDlg
from OsmSaveDlg import OsmSaveDlg
from OsmDownloadDlg import OsmDownloadDlg
from OsmUploadDlg import OsmUploadDlg
from OsmImportDlg import OsmImportDlg
from OsmFeatureDW import *
from OsmUndoRedoDW import *
# initialize Qt resources from file resouces.py
import resources_rc
def versionNumber():
"""Returns current version number of OpenStreetMap plugin.
@return current version number of the plugin
"""
return "0.5"
class OsmPlugin:
"""OsmPlugin is the main class OSM Plugin module.
It shows/hides all tool buttons, widgets and dialogs and after closing dialogs
it does all actions related with their return codes.
"""
def __init__(self, iface):
"""The constructor.
@param iface QgisInterface object
"""
self.iface=iface
self.canvas=self.iface.mapCanvas()
self.http=QHttp()
self.outFile=None
self.httpGetId=0
self.httpRequestAborted=False
self.fname=""
def initGui(self):
"""Function initalizes GUI of the OSM Plugin.
"""
self.dockWidgetVisible = False
# create action for loading OSM file
self.actionLoad=QAction(QIcon(":/plugins/osm_plugin/images/osm_load.png")
,"Load OSM from file", self.iface.mainWindow())
self.actionLoad.setWhatsThis("Load OpenStreetMap from file")
# create action for import of a layer into OSM
self.actionImport=QAction(QIcon(":/plugins/osm_plugin/images/osm_import.png")
,"Import data from a layer", self.iface.mainWindow())
self.actionImport.setWhatsThis("Import data from a layer to OpenStreetMap")
# create action for saving OSM file
self.actionSave=QAction(QIcon(":/plugins/osm_plugin/images/osm_save.png")
,"Save OSM to file", self.iface.mainWindow())
self.actionSave.setWhatsThis("Save OpenStreetMap to file")
# create action for OSM data downloading
self.actionDownload=QAction(QIcon(":/plugins/osm_plugin/images/osm_download.png")
,"Download OSM data", self.iface.mainWindow())
self.actionDownload.setWhatsThis("Download OpenStreetMap data")
# create action for OSM data downloading
self.actionUpload=QAction(QIcon(":/plugins/osm_plugin/images/osm_upload.png")
,"Upload OSM data", self.iface.mainWindow())
self.actionUpload.setWhatsThis("Upload OpenStreetMap data")
# create action for OSM dockable window
self.actionDockWidget=QAction(QIcon(":/plugins/osm_plugin/images/osm_featureManager.png")
,"Show/Hide OSM Feature Manager",self.iface.mainWindow())
self.actionDockWidget.setWhatsThis("Show/Hide OpenStreetMap Feature Manager")
self.actionDockWidget.setCheckable(True)
# connect new action to plugin function - when action is triggered
QObject.connect(self.actionLoad, SIGNAL("triggered()"), self.loadOsmFromFile)
QObject.connect(self.actionSave, SIGNAL("triggered()"), self.saveOsmToFile)
QObject.connect(self.actionDownload, SIGNAL("triggered()"), self.downloadOsmData)
QObject.connect(self.actionUpload, SIGNAL("triggered()"), self.uploadOsmData)
QObject.connect(self.actionDockWidget, SIGNAL("triggered()"), self.showHideDockWidget)
QObject.connect(self.actionImport, SIGNAL("triggered()"), self.importData)
# create a toolbar
self.toolBar=self.iface.addToolBar("OpenStreetMap")
self.toolBar.setObjectName("OpenStreetMap")
self.toolBar.addAction(self.actionLoad)
self.toolBar.addAction(self.actionDockWidget)
self.toolBar.addAction(self.actionDownload)
self.toolBar.addAction(self.actionUpload)
self.toolBar.addAction(self.actionImport)
self.toolBar.addAction(self.actionSave)
# populate plugins menu
self.iface.addPluginToMenu("&OpenStreetMap", self.actionLoad)
self.iface.addPluginToMenu("&OpenStreetMap", self.actionDockWidget)
self.iface.addPluginToMenu("&OpenStreetMap", self.actionDownload)
self.iface.addPluginToMenu("&OpenStreetMap", self.actionUpload)
self.iface.addPluginToMenu("&OpenStreetMap", self.actionImport)
self.iface.addPluginToMenu("&OpenStreetMap", self.actionSave)
# create manager of sqlite database(-s)
self.dbm=OsmDatabaseManager(self)
self.undoredo=None
self.dockWidget=None
# create widget for undo/redo actions
self.undoredo=OsmUndoRedoDW(self)
self.iface.addDockWidget(Qt.LeftDockWidgetArea,self.undoredo)
self.undoredo.hide()
QObject.connect(self.undoredo,SIGNAL("visibilityChanged(bool)"),self.__urVisibilityChanged)
self.undoredo.setContentEnabled(False)
# create widget for osm feature info
self.dockWidget=OsmFeatureDW(self)
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockWidget)
QObject.connect(self.dockWidget,SIGNAL("visibilityChanged(bool)"),self.__ofVisibilityChanged)
self.dockWidget.setContentEnabled(False)
def unload(self):
"""Function unloads the OSM Plugin.
"""
self.canvas.unsetMapTool(self.dockWidget.mapTool)
del self.dockWidget.mapTool
self.dockWidget.mapTool=None
# remove the plugin menu items
self.iface.removePluginMenu("&OpenStreetMap",self.actionLoad)
self.iface.removePluginMenu("&OpenStreetMap",self.actionSave)
self.iface.removePluginMenu("&OpenStreetMap",self.actionDownload)
self.iface.removePluginMenu("&OpenStreetMap",self.actionUpload)
self.iface.removePluginMenu("&OpenStreetMap",self.actionImport)
self.iface.removePluginMenu("&OpenStreetMap",self.actionDockWidget)
self.dockWidget.close()
if self.dockWidget.rubBand:
self.dockWidget.rubBand.reset(False)
if self.dockWidget.rubBandPol:
self.dockWidget.rubBandPol.reset(True)
self.undoredo.clear()
self.undoredo.close()
self.iface.removeDockWidget(self.dockWidget)
self.iface.removeDockWidget(self.undoredo)
del self.dockWidget
del self.undoredo
self.dockWidget=None
self.undoredo=None
# remove toolbar
del self.toolBar
# w/o osm plugin we don't need osm layers
self.dbm.removeAllOsmLayers()
def loadOsmFromFile(self):
"""Function shows up the "Load OSM from file" dialog.
After closing it, function calls the appropriate actions
according to dialog's return code.
"""
# sanity check whether we're able to load osm data
if 'osm' not in QgsProviderRegistry.instance().providerList():
QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!")
return
# show modal dialog with OSM file selection
self.dlgLoad=OsmLoadDlg(self)
# continue only if OK button was clicked
if self.dlgLoad.exec_()==0:
return
self.fname=self.dlgLoad.OSMFileEdit.text()
self.dbFileName=self.fname+".db"
self.dbm.addDatabase(self.dbFileName,self.dlgLoad.pointLayer,self.dlgLoad.lineLayer,self.dlgLoad.polygonLayer)
self.undoredo.clear()
self.dockWidget.setContentEnabled(True)
self.undoredo.setContentEnabled(True)
self.dataLoaded=True
def saveOsmToFile(self):
"""Function shows up the "Save OSM to file" dialog.
After closing it, function calls the appropriate actions
according to dialog's return code.
"""
if 'osm' not in QgsProviderRegistry.instance().providerList():
QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!")
return
if not self.dbm.currentKey:
QMessageBox.information(QWidget(), QString("OSM Save to file")
,"No OSM data are loaded/downloaded or no OSM layer is selected in Layers panel. \
Please change this situation first, because OSM Plugin doesn't know what to save.")
return
# show modal dialog with OSM file selection
self.dlgSave=OsmSaveDlg(self)
# continue only if OK button was clicked
if self.dlgSave.exec_()==0:
return
def downloadOsmData(self):
"""Function shows up the "Download OSM data" dialog.
After closing it, function calls the appropriate actions
according to dialog's return code.
"""
if 'osm' not in QgsProviderRegistry.instance().providerList():
QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!")
return
self.dlgDownload=OsmDownloadDlg(self)
self.dlgDownload.exec_()
if not self.dlgDownload.httpSuccess:
return
if not self.dlgDownload.autoLoadCheckBox.isChecked():
return
# create loading dialog, submit it
self.dlgLoad=OsmLoadDlg(self)
self.dlgLoad.setModal(True)
self.dlgLoad.show()
self.dlgLoad.close()
self.dlgLoad.OSMFileEdit.setText(self.dlgDownload.destdirLineEdit.text())
self.dlgLoad.styleCombo.setCurrentIndex(self.dlgDownload.styleCombo.currentIndex())
if self.dlgDownload.chkCustomRenderer.isChecked():
self.dlgLoad.chkCustomRenderer.setChecked(True)
else:
self.dlgLoad.chkCustomRenderer.setChecked(False)
for row in xrange(self.dlgLoad.lstTags.count()):
self.dlgLoad.lstTags.item(row).setCheckState(Qt.Checked)
if self.dlgDownload.chkReplaceData.isChecked():
self.dlgLoad.chkReplaceData.setChecked(True)
else:
self.dlgLoad.chkReplaceData.setChecked(False)
self.dlgLoad.onOK()
self.fname=self.dlgLoad.OSMFileEdit.text()
self.dbFileName=self.fname+".db"
self.dbm.addDatabase(self.dbFileName,self.dlgLoad.pointLayer,self.dlgLoad.lineLayer,self.dlgLoad.polygonLayer)
def uploadOsmData(self):
"""Function shows up the "Upload OSM data" dialog.
After closing it, function calls the appropriate actions
according to dialog's return code.
"""
if 'osm' not in QgsProviderRegistry.instance().providerList():
QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!")
return
# first check if there are some data; if not upload doesn't have sense
if not self.dbm.currentKey:
QMessageBox.information(QWidget(), QString("OSM Upload")
,"No OSM data are loaded/downloaded or no OSM layer is selected in Layers panel. \
Please change this situation first, because OSM Plugin doesn't know what to upload.")
return
self.dlgUpload=OsmUploadDlg(self)
self.dlgUpload.exec_()
def importData(self):
"""Function shows up the "Import OSM data" dialog.
After closing it, function calls the appropriate actions
according to dialog's return code.
"""
if 'osm' not in QgsProviderRegistry.instance().providerList():
QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!")
return
if self.dbm.currentKey is None:
QMessageBox.information(self.iface.mainWindow(), "OSM Import"
,"No OSM data are loaded/downloaded or no OSM layer is selected in Layers panel. \
Please change this situation first, because OSM Plugin doesn't know what layer will be destination of the import.")
return
dlg=OsmImportDlg(self)
if dlg.cboLayer.count()==0:
QMessageBox.information(self.iface.mainWindow(), "OSM Import", "There are currently no available vector layers.")
return
dlg.exec_()
def showHideDockWidget(self):
"""Function shows/hides main dockable widget of the plugin ("OSM Feature" widget)
"""
if self.dockWidget.isVisible():
self.dockWidget.hide()
else:
self.dockWidget.show()
def __urVisibilityChanged(self):
"""Function is called after visibilityChanged(...) signal is emitted on OSM Edit History widget.
Function changes state of related checkbox according to the fact
if widget is currently visible of not.
"""
if self.undoredo.isVisible():
self.dockWidget.urDetailsButton.setChecked(True)
else:
self.dockWidget.urDetailsButton.setChecked(False)
def __ofVisibilityChanged(self):
"""Function is called after visibilityChanged(...) signal is emitted on OSM Feature widget.
Function changes state of appropriate tool button according to the fact
if widget is currently visible of not.
"""
if self.dockWidget.isVisible():
self.actionDockWidget.setChecked(True)
else:
self.actionDockWidget.setChecked(False)
| gpl-2.0 |
joonas-fi/sumatrapdf | ext/freetype2/src/tools/chktrcmp.py | 192 | 3823 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009, 2013
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
| gpl-3.0 |
haripradhan/MissionPlanner | Lib/xml/etree/SimpleXMLTreeBuilder.py | 42 | 4805 | #
# ElementTree
# $Id: SimpleXMLTreeBuilder.py 3225 2007-08-27 21:32:08Z fredrik $
#
# A simple XML tree builder, based on Python's xmllib
#
# Note that due to bugs in xmllib, this builder does not fully support
# namespaces (unqualified attributes are put in the default namespace,
# instead of being left as is). Run this module as a script to find
# out if this affects your Python version.
#
# history:
# 2001-10-20 fl created
# 2002-05-01 fl added namespace support for xmllib
# 2002-08-17 fl added xmllib sanity test
#
# Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to build element trees from XML files, using <b>xmllib</b>.
# This module can be used instead of the standard tree builder, for
# Python versions where "expat" is not available (such as 1.5.2).
# <p>
# Note that due to bugs in <b>xmllib</b>, the namespace support is
# not reliable (you can run the module as a script to find out exactly
# how unreliable it is on your Python version).
##
import xmllib, string
import ElementTree
##
# ElementTree builder for XML source data.
#
# @see elementtree.ElementTree
class TreeBuilder(xmllib.XMLParser):
def __init__(self, html=0, target=None, encoding=None):
self.__builder = ElementTree.TreeBuilder()
if html:
import htmlentitydefs
self.entitydefs.update(htmlentitydefs.entitydefs)
xmllib.XMLParser.__init__(self)
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
xmllib.XMLParser.feed(self, data)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
xmllib.XMLParser.close(self)
return self.__builder.close()
def handle_data(self, data):
self.__builder.data(data)
handle_cdata = handle_data
def unknown_starttag(self, tag, attrs):
attrib = {}
for key, value in attrs.items():
attrib[fixname(key)] = value
self.__builder.start(fixname(tag), attrib)
def unknown_endtag(self, tag):
self.__builder.end(fixname(tag))
def fixname(name, split=string.split):
# xmllib in 2.0 and later provides limited (and slightly broken)
# support for XML namespaces.
if " " not in name:
return name
return "{%s}%s" % tuple(split(name, " ", 1))
if __name__ == "__main__":
import sys
# sanity check: look for known namespace bugs in xmllib
p = TreeBuilder()
text = """\
<root xmlns='default'>
<tag attribute='value' />
</root>
"""
p.feed(text)
tree = p.close()
status = []
# check for bugs in the xmllib implementation
tag = tree.find("{default}tag")
if tag is None:
status.append("namespaces not supported")
if tag is not None and tag.get("{default}attribute"):
status.append("default namespace applied to unqualified attribute")
# report bugs
if status:
print "xmllib doesn't work properly in this Python version:"
for bug in status:
print "-", bug
else:
print "congratulations; no problems found in xmllib"
| gpl-3.0 |
wonder-sk/QGIS | python/ext-libs/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| gpl-2.0 |
Metaswitch/horizon | openstack_dashboard/api/ceilometer.py | 18 | 49104 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtained by specifying
multiple ids in one parameter or by not specifying
one parameter.
It can also be specified by query directly.
Example:
We can obtain an aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and tenant_id:
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if ceilometer_usage and user_id:
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if resource_id:
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None, limit=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query, limit=limit)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather than fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all tenants into dictionary.
It's more effective to preload all tenants, rather than fetching each
tenant by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by their links.rel attr.
The links.rel attributes contain all meters the resource has.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtained by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resources must be defined to be "
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters.
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter '
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names.
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
('hardware.ipmi.node.temperature', {
'label': '',
'description': _("System Temperature Reading"),
}),
('hardware.ipmi.node.outlet_temperature', {
'label': '',
'description': _("System Outlet Temperature Reading"),
}),
('hardware.ipmi.node.airflow', {
'label': '',
'description': _("System Airflow Reading"),
}),
('hardware.ipmi.node.cups', {
'label': '',
'description': _("System CUPS Reading"),
}),
('hardware.ipmi.node.cpu_util', {
'label': '',
'description': _("System CPU Utility Reading"),
}),
('hardware.ipmi.node.mem_util', {
'label': '',
'description': _("System Memory Utility Reading"),
}),
('hardware.ipmi.node.io_util', {
'label': '',
'description': _("System IO Utility Reading"),
}),
])
| apache-2.0 |
israelbenatar/boto | tests/integration/kinesis/test_kinesis.py | 99 | 4404 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import time
import boto
from tests.compat import unittest
from boto.kinesis.exceptions import ResourceNotFoundException
class TimeoutError(Exception):
pass
class TestKinesis(unittest.TestCase):
def setUp(self):
self.kinesis = boto.connect_kinesis()
def test_kinesis(self):
kinesis = self.kinesis
# Create a new stream
kinesis.create_stream('test', 1)
self.addCleanup(self.kinesis.delete_stream, 'test')
# Wait for the stream to be ready
tries = 0
while tries < 10:
tries += 1
time.sleep(15)
response = kinesis.describe_stream('test')
if response['StreamDescription']['StreamStatus'] == 'ACTIVE':
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
break
else:
raise TimeoutError('Stream is still not active, aborting...')
# Make a tag.
kinesis.add_tags_to_stream(stream_name='test', tags={'foo': 'bar'})
# Check that the correct tag is there.
response = kinesis.list_tags_for_stream(stream_name='test')
self.assertEqual(len(response['Tags']), 1)
self.assertEqual(response['Tags'][0],
{'Key':'foo', 'Value': 'bar'})
# Remove the tag and ensure it is removed.
kinesis.remove_tags_from_stream(stream_name='test', tag_keys=['foo'])
response = kinesis.list_tags_for_stream(stream_name='test')
self.assertEqual(len(response['Tags']), 0)
# Get ready to process some data from the stream
response = kinesis.get_shard_iterator('test', shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Write some data to the stream
data = 'Some data ...'
record = {
'Data': data,
'PartitionKey': data,
}
response = kinesis.put_record('test', data, data)
response = kinesis.put_records([record, record.copy()], 'test')
# Wait for the data to show up
tries = 0
num_collected = 0
num_expected_records = 3
collected_records = []
while tries < 100:
tries += 1
time.sleep(1)
response = kinesis.get_records(shard_iterator)
shard_iterator = response['NextShardIterator']
for record in response['Records']:
if 'Data' in record:
collected_records.append(record['Data'])
num_collected += 1
if num_collected >= num_expected_records:
self.assertEqual(num_expected_records, num_collected)
break
else:
raise TimeoutError('No records found, aborting...')
# Read the data, which should be the same as what we wrote
for record in collected_records:
self.assertEqual(data, record)
def test_describe_non_existent_stream(self):
with self.assertRaises(ResourceNotFoundException) as cm:
self.kinesis.describe_stream('this-stream-shouldnt-exist')
# Assert things about the data we passed along.
self.assertEqual(cm.exception.error_code, None)
self.assertTrue('not found' in cm.exception.message)
| mit |
blaggacao/OpenUpgrade | addons/hr_attendance/__init__.py | 434 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
75651/kbengine_cloud | kbe/res/scripts/common/Lib/ssl.py | 67 | 34420 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
PROTOCOL_TLSv1_1
PROTOCOL_TLSv1_2
The following constants identify various SSL alert message descriptions as per
http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6
ALERT_DESCRIPTION_CLOSE_NOTIFY
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE
ALERT_DESCRIPTION_BAD_RECORD_MAC
ALERT_DESCRIPTION_RECORD_OVERFLOW
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE
ALERT_DESCRIPTION_HANDSHAKE_FAILURE
ALERT_DESCRIPTION_BAD_CERTIFICATE
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE
ALERT_DESCRIPTION_CERTIFICATE_REVOKED
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN
ALERT_DESCRIPTION_ILLEGAL_PARAMETER
ALERT_DESCRIPTION_UNKNOWN_CA
ALERT_DESCRIPTION_ACCESS_DENIED
ALERT_DESCRIPTION_DECODE_ERROR
ALERT_DESCRIPTION_DECRYPT_ERROR
ALERT_DESCRIPTION_PROTOCOL_VERSION
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY
ALERT_DESCRIPTION_INTERNAL_ERROR
ALERT_DESCRIPTION_USER_CANCELLED
ALERT_DESCRIPTION_NO_RENEGOTIATION
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
ALERT_DESCRIPTION_UNRECOGNIZED_NAME
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
"""
import textwrap
import re
import sys
import os
from collections import namedtuple
from enum import Enum as _Enum
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import (VERIFY_DEFAULT, VERIFY_CRL_CHECK_LEAF, VERIFY_CRL_CHECK_CHAIN,
VERIFY_X509_STRICT)
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_egd, RAND_add, RAND_bytes, RAND_pseudo_bytes
def _import_symbols(prefix):
for n in dir(_ssl):
if n.startswith(prefix):
globals()[n] = getattr(_ssl, n)
_import_symbols('OP_')
_import_symbols('ALERT_DESCRIPTION_')
_import_symbols('SSL_ERROR_')
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _ssl import _OPENSSL_API_VERSION
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
_SSLv2_IF_EXISTS = PROTOCOL_SSLv2
except ImportError:
_SSLv2_IF_EXISTS = None
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
try:
from _ssl import PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
except ImportError:
pass
else:
_PROTOCOL_NAMES[PROTOCOL_TLSv1_1] = "TLSv1.1"
_PROTOCOL_NAMES[PROTOCOL_TLSv1_2] = "TLSv1.2"
if sys.platform == "win32":
from _ssl import enum_certificates, enum_crls
from socket import socket, AF_INET, SOCK_STREAM, create_connection
from socket import SOL_SOCKET, SO_TYPE
import base64 # for DER-to-PEM translation
import errno
socket_error = OSError # keep that public name in module namespace
if _ssl.HAS_TLS_UNIQUE:
CHANNEL_BINDING_TYPES = ['tls-unique']
else:
CHANNEL_BINDING_TYPES = []
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
# Enable a better set of ciphers by default
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Finally use RC4 as a fallback which is problematic but needed for
# compatibility some times.
# * Disable NULL authentication, NULL encryption, and MD5 MACs for security
# reasons
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
)
# Restricted and more secure ciphers for the server side
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, and RC4 for
# security reasons
_RESTRICTED_SERVER_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
leftmost, *remainder = dn.split(r'.')
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
DefaultVerifyPaths = namedtuple("DefaultVerifyPaths",
"cafile capath openssl_cafile_env openssl_cafile openssl_capath_env "
"openssl_capath")
def get_default_verify_paths():
"""Return paths to default cafile and capath.
"""
parts = _ssl.get_default_verify_paths()
# environment vars shadow paths
cafile = os.environ.get(parts[0], parts[1])
capath = os.environ.get(parts[2], parts[3])
return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None,
capath if os.path.isdir(capath) else None,
*parts)
class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")):
"""ASN.1 object identifier lookup
"""
__slots__ = ()
def __new__(cls, oid):
return super().__new__(cls, *_txt2obj(oid, name=False))
@classmethod
def fromnid(cls, nid):
"""Create _ASN1Object from OpenSSL numeric ID
"""
return super().__new__(cls, *_nid2obj(nid))
@classmethod
def fromname(cls, name):
"""Create _ASN1Object from short name, long name or OID
"""
return super().__new__(cls, *_txt2obj(name, name=True))
class Purpose(_ASN1Object, _Enum):
"""SSLContext purpose flags with X509v3 Extended Key Usage objects
"""
SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
class SSLContext(_SSLContext):
"""An SSLContext holds various SSL-related configuration options and
data, such as certificates and possibly a private key."""
__slots__ = ('protocol', '__weakref__')
_windows_cert_stores = ("CA", "ROOT")
def __new__(cls, protocol, *args, **kwargs):
self = _SSLContext.__new__(cls, protocol)
if protocol != _SSLv2_IF_EXISTS:
self.set_ciphers(_DEFAULT_CIPHERS)
return self
def __init__(self, protocol):
self.protocol = protocol
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self)
def set_npn_protocols(self, npn_protocols):
protos = bytearray()
for protocol in npn_protocols:
b = bytes(protocol, 'ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('NPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_npn_protocols(protos)
def _load_windows_store_certs(self, storename, purpose):
certs = bytearray()
for cert, encoding, trust in enum_certificates(storename):
# CA certs are never PKCS#7 encoded
if encoding == "x509_asn":
if trust is True or purpose.oid in trust:
certs.extend(cert)
self.load_verify_locations(cadata=certs)
return certs
def load_default_certs(self, purpose=Purpose.SERVER_AUTH):
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
if sys.platform == "win32":
for storename in self._windows_cert_stores:
self._load_windows_store_certs(storename, purpose)
else:
self.set_default_verify_paths()
def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
# Prefer the server's ciphers by default so that we get stronger
# encryption
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_stdlib_context(protocol=PROTOCOL_SSLv23, *, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(protocol)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None):
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
if server_side and server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if self._context.check_hostname and not server_hostname:
if HAS_SNI:
raise ValueError("check_hostname requires server_hostname")
else:
raise ValueError("check_hostname requires server_hostname, "
"but it's not supported by your OpenSSL "
"library")
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
if sock is not None:
socket.__init__(self,
family=sock.family,
type=sock.type,
proto=sock.proto,
fileno=sock.fileno())
self.settimeout(sock.gettimeout())
sock.detach()
elif fileno is not None:
socket.__init__(self, fileno=fileno)
else:
socket.__init__(self, family=family, type=type, proto=proto)
# See if we are connected
try:
self.getpeername()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
self._sslobj = self._context._wrap_socket(self, server_side,
server_hostname)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except (OSError, ValueError):
self.close()
raise
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
def dup(self):
raise NotImplemented("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=0, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Read on closed or unwrapped SSL socket.")
try:
if buffer is not None:
v = self._sslobj.read(len, buffer)
else:
v = self._sslobj.read(len or 1024)
return v
except SSLError as x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Write on closed or unwrapped SSL socket.")
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
try:
v = self._sslobj.write(data)
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return socket.sendto(self, data, flags_or_addr)
else:
return socket.sendto(self, data, flags_or_addr, addr)
def sendmsg(self, *args, **kwargs):
# Ensure programs don't send data unencrypted if they try to
# use this method.
raise NotImplementedError("sendmsg not allowed on instances of %s" %
self.__class__)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
return self.read(nbytes, buffer)
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def recvmsg(self, *args, **kwargs):
raise NotImplementedError("recvmsg not allowed on instances of %s" %
self.__class__)
def recvmsg_into(self, *args, **kwargs):
raise NotImplementedError("recvmsg_into not allowed on instances of "
"%s" % self.__class__)
def pending(self):
self._checkClosed()
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def shutdown(self, how):
self._checkClosed()
self._sslobj = None
socket.shutdown(self, how)
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def _real_close(self):
self._sslobj = None
socket._real_close(self)
def do_handshake(self, block=False):
"""Perform a TLS/SSL handshake."""
self._check_connected()
timeout = self.gettimeout()
try:
if timeout == 0.0 and block:
self.settimeout(None)
self._sslobj.do_handshake()
finally:
self.settimeout(timeout)
if self.context.check_hostname:
if not self.server_hostname:
raise ValueError("check_hostname needs server_hostname "
"argument")
match_hostname(self.getpeercert(), self.server_hostname)
def _real_connect(self, addr, connect_ex):
if self.server_side:
raise ValueError("can't connect in server-side mode")
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = self.context._wrap_socket(self, False, self.server_hostname)
try:
if connect_ex:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
self._connected = True
if self.do_handshake_on_connect:
self.do_handshake()
return rc
except (OSError, ValueError):
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock = self.context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True)
return newsock, addr
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
if self._sslobj is None:
return None
return self._sslobj.tls_unique_cb()
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None):
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict')
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodebytes(d.encode('ASCII', 'strict'))
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
context = _create_stdlib_context(ssl_version,
cert_reqs=cert_reqs,
cafile=ca_certs)
with create_connection(addr) as sock:
with context.wrap_socket(sock) as sslsock:
dercert = sslsock.getpeercert(True)
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
| lgpl-3.0 |
gnarula/eden_deployment | modules/s3db/msg.py | 1 | 88933 | # -*- coding: utf-8 -*-
""" Sahana Eden Messaging Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ChannelModel",
"S3MessageModel",
"S3MessageAttachmentModel",
"S3EmailModel",
"S3FacebookModel",
"S3MCommonsModel",
"S3ParsingModel",
"S3RSSModel",
"S3SMSModel",
"S3SMSOutboundModel",
"S3TropoModel",
"S3TwilioModel",
"S3TwitterModel",
"S3TwitterSearchModel",
"S3XFormsModel",
"S3BaseStationModel",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3ChannelModel(S3Model):
"""
Messaging Channels
- all Inbound & Outbound channels for messages are instances of this
super-entity
"""
names = ("msg_channel",
"msg_channel_limit",
"msg_channel_status",
"msg_channel_id",
"msg_channel_enable",
"msg_channel_disable",
"msg_channel_enable_interactive",
"msg_channel_disable_interactive",
"msg_channel_onaccept",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
#----------------------------------------------------------------------
# Super entity: msg_channel
#
channel_types = Storage(msg_email_channel = T("Email (Inbound)"),
msg_facebook_channel = T("Facebook"),
msg_mcommons_channel = T("Mobile Commons (Inbound)"),
msg_rss_channel = T("RSS Feed"),
msg_sms_modem_channel = T("SMS Modem"),
msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"),
msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"),
msg_tropo_channel = T("Tropo"),
msg_twilio_channel = T("Twilio (Inbound)"),
msg_twitter_channel = T("Twitter"),
)
tablename = "msg_channel"
self.super_entity(tablename, "channel_id",
channel_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?")
#represent = s3_yes_no_represent,
),
# @ToDo: Indicate whether channel can be used for Inbound or Outbound
#Field("inbound", "boolean",
# label = T("Inbound?")),
#Field("outbound", "boolean",
# label = T("Outbound?")),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
# Reusable Field
channel_id = S3ReusableField("channel_id", "reference %s" % tablename,
label = T("Channel"),
ondelete = "SET NULL",
represent = S3Represent(lookup=tablename),
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_channel.id")),
)
self.add_components(tablename,
msg_channel_status = "channel_id",
)
# ---------------------------------------------------------------------
# Channel Limit
# Used to limit the number of emails sent from the system
# - works by simply recording an entry for the timestamp to be checked against
#
# - currently just used by msg.send_email()
#
tablename = "msg_channel_limit"
define_table(tablename,
# @ToDo: Make it per-channel
#channel_id(),
*s3_timestamp())
# ---------------------------------------------------------------------
# Channel Status
# Used to record errors encountered in the Channel
#
tablename = "msg_channel_status"
define_table(tablename,
channel_id(),
Field("status",
#label = T("Status")
#represent = s3_yes_no_represent,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict(msg_channel_id = channel_id,
msg_channel_enable = self.channel_enable,
msg_channel_disable = self.channel_disable,
msg_channel_enable_interactive = self.channel_enable_interactive,
msg_channel_disable_interactive = self.channel_disable_interactive,
msg_channel_onaccept = self.channel_onaccept,
msg_channel_poll = self.channel_poll,
)
# -------------------------------------------------------------------------
@staticmethod
def channel_enable(tablename, channel_id):
"""
Enable a Channel
- Schedule a Poll for new messages
- Enable all associated Parsers
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
# Update Instance
record.update_record(enabled = True)
# Update Super
s3db.update_super(table, record)
# Enable all Parser tasks on this channel
ptable = s3db.msg_parser
query = (ptable.channel_id == channel_id) & \
(ptable.deleted == False)
parsers = db(query).select(ptable.id)
for parser in parsers:
s3db.msg_parser_enable(parser.id)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Channel already enabled"
else:
current.s3task.schedule_task("msg_poll",
args = [tablename, channel_id],
period = 300, # seconds
timeout = 300, # seconds
repeats = 0 # unlimited
)
return "Channel enabled"
# -------------------------------------------------------------------------
@staticmethod
def channel_enable_interactive(r, **attr):
"""
Enable a Channel
- Schedule a Poll for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_enable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_disable(tablename, channel_id):
"""
Disable a Channel
- Remove schedule for Polling for new messages
- Disable all associated Parsers
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
# Update Instance
record.update_record(enabled = False)
# Update Super
s3db.update_super(table, record)
# Disable all Parser tasks on this channel
ptable = s3db.msg_parser
parsers = db(ptable.channel_id == channel_id).select(ptable.id)
for parser in parsers:
s3db.msg_parser_disable(parser.id)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Channel disabled"
else:
return "Channel already disabled"
# --------------------------------------------------------------------------
@staticmethod
def channel_disable_interactive(r, **attr):
"""
Disable a Channel
- Remove schedule for Polling for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_disable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_channel_disable(form.table._tablename,
form.vars.channel_id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form.vars.channel_id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form.vars.channel_id)
# -------------------------------------------------------------------------
@staticmethod
def channel_poll(r, **attr):
"""
Poll a Channel for new messages
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.async("msg_poll", args=[tablename, r.record.channel_id])
current.session.confirmation = \
current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them")
if tablename == "msg_email_channel":
fn = "email_inbox"
elif tablename == "msg_mcommons_channel":
fn = "sms_inbox"
elif tablename == "msg_rss_channel":
fn = "rss"
elif tablename == "msg_twilio_channel":
fn = "sms_inbox"
elif tablename == "msg_twitter_channel":
fn = "twitter_inbox"
else:
return "Unsupported channel: %s" % tablename
redirect(URL(f=fn))
# =============================================================================
class S3MessageModel(S3Model):
"""
Messages
"""
names = ("msg_message",
"msg_message_id",
"msg_message_represent",
"msg_outbox",
)
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
configure = self.configure
define_table = self.define_table
# Message priority
msg_priority_opts = {3 : T("High"),
2 : T("Medium"),
1 : T("Low"),
}
# ---------------------------------------------------------------------
# Message Super Entity - all Inbound & Outbound Messages
#
message_types = Storage(msg_email = T("Email"),
msg_facebook = T("Facebook"),
msg_rss = T("RSS"),
msg_sms = T("SMS"),
msg_twitter = T("Twitter"),
msg_twitter_result = T("Twitter Search Results"),
)
tablename = "msg_message"
self.super_entity(tablename, "message_id",
message_types,
# Knowing which Channel Incoming Messages
# came in on allows correlation to Outbound
# messages (campaign_message, deployment_alert, etc)
self.msg_channel_id(),
s3_datetime(default="now"),
Field("body", "text",
label = T("Message"),
),
Field("from_address",
label = T("From"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
),
)
# @todo: make lazy_table
table = db[tablename]
table.instance_type.readable = True
table.instance_type.writable = True
configure(tablename,
list_fields = ["instance_type",
"from_address",
"to_address",
"body",
"inbound",
],
)
# Reusable Field
message_represent = S3Represent(lookup=tablename, fields=["body"])
message_id = S3ReusableField("message_id", "reference %s" % tablename,
ondelete = "RESTRICT",
represent = message_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_message.id")),
)
self.add_components(tablename,
msg_attachment = "message_id",
deploy_response = "message_id",
)
# ---------------------------------------------------------------------
# Outbound Messages
#
# Show only the supported messaging methods
MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS
# Maximum number of retries to send a message
MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries()
# Valid message outbox statuses
MSG_STATUS_OPTS = {1 : T("Unsent"),
2 : T("Sent"),
3 : T("Draft"),
4 : T("Invalid"),
5 : T("Failed"),
}
opt_msg_status = S3ReusableField("status", "integer",
notnull=True,
requires = IS_IN_SET(MSG_STATUS_OPTS,
zero=None),
default = 1,
label = T("Status"),
represent = lambda opt: \
MSG_STATUS_OPTS.get(opt,
UNKNOWN_OPT))
# Outbox - needs to be separate to Message since a single message
# sent needs different outbox entries for each recipient
tablename = "msg_outbox"
define_table(tablename,
# FK not instance
message_id(),
# Person/Group to send the message out to:
self.super_link("pe_id", "pr_pentity"),
# If set used instead of picking up from pe_id:
Field("address"),
Field("contact_method", length=32,
default = "EMAIL",
label = T("Contact Method"),
represent = lambda opt: \
MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(MSG_CONTACT_OPTS,
zero=None),
),
opt_msg_status(),
# Used to loop through a PE to get it's members
Field("system_generated", "boolean",
default = False,
),
# Give up if we can't send after MAX_RETRIES
Field("retries", "integer",
default = MAX_SEND_RETRIES,
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
"message_id",
"pe_id",
"status",
],
orderby = "msg_outbox.created_on desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(msg_message_id = message_id,
msg_message_represent = message_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(msg_message_id = lambda **attr: dummy("message_id"),
)
# =============================================================================
class S3MessageAttachmentModel(S3Model):
"""
Message Attachments
- link table between msg_message & doc_document
"""
names = ("msg_attachment",)
def model(self):
# ---------------------------------------------------------------------
#
tablename = "msg_attachment"
self.define_table(tablename,
# FK not instance
self.msg_message_id(),
self.doc_document_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3EmailModel(S3ChannelModel):
"""
Email
InBound Channels
Outbound Email is currently handled via deployment_settings
InBox/OutBox
"""
names = ("msg_email_channel",
"msg_email",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Email Inbound Channels
#
tablename = "msg_email_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("server"),
Field("protocol",
requires = IS_IN_SET(["imap", "pop3"],
zero=None),
),
Field("use_ssl", "boolean"),
Field("port", "integer"),
Field("username"),
Field("password", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY(),
),
# Set true to delete messages from the remote
# inbox after fetching them.
Field("delete_from_server", "boolean"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "email_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "email_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "email_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Email Messages: InBox & Outbox
#
sender = current.deployment_settings.get_mail_sender()
tablename = "msg_email"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now"),
Field("subject", length=78, # RFC 2822
label = T("Subject"),
),
Field("body", "text",
label = T("Message"),
),
Field("from_address", #notnull=True,
default = sender,
label = T("Sender"),
requires = IS_EMAIL(),
),
Field("to_address",
label = T("To"),
requires = IS_EMAIL(),
),
Field("raw", "text",
label = T("Message Source"),
readable = False,
writable = False,
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_email.date desc",
super_entity = "msg_message",
)
# Components
self.add_components(tablename,
# Used to link to custom tab deploy_response_select_mission:
deploy_mission = {"name": "select",
"link": "deploy_response",
"joinby": "message_id",
"key": "mission_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3FacebookModel(S3ChannelModel):
"""
Facebook
Channels
InBox/OutBox
https://developers.facebook.com/docs/graph-api
"""
names = ("msg_facebook_channel",
"msg_facebook",
"msg_facebook_login",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Facebook Channels
#
tablename = "msg_facebook_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("login", "boolean",
default = False,
label = T("Use for Login?"),
represent = s3_yes_no_represent,
),
Field("app_id", "bigint",
requires = IS_INT_IN_RANGE(0, +1e16)
),
Field("app_secret", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY(),
),
# Optional
Field("page_id", "bigint",
requires = IS_INT_IN_RANGE(0, +1e16)
),
Field("page_access_token"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_facebook_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "facebook_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "facebook_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
#set_method("msg", "facebook_channel",
# method = "poll",
# action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Facebook Messages: InBox & Outbox
#
tablename = "msg_facebook"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now"),
Field("body", "text",
label = T("Message"),
),
# @ToDo: Are from_address / to_address relevant in Facebook?
Field("from_address", #notnull=True,
#default = sender,
label = T("Sender"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_facebook.date desc",
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict(msg_facebook_login = self.msg_facebook_login,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
return dict(msg_facebook_login = lambda: False,
)
# -------------------------------------------------------------------------
@staticmethod
def msg_facebook_channel_onaccept(form):
if form.vars.login:
# Ensure only a single account used for Login
current.db(current.s3db.msg_facebook_channel.id != form.vars.id).update(login = False)
# Normal onaccept processing
S3ChannelModel.channel_onaccept(form)
# -------------------------------------------------------------------------
@staticmethod
def msg_facebook_login():
table = current.s3db.msg_facebook_channel
query = (table.login == True) & \
(table.deleted == False)
c = current.db(query).select(table.app_id,
table.app_secret,
limitby=(0, 1)
).first()
return c
# =============================================================================
class S3MCommonsModel(S3ChannelModel):
"""
Mobile Commons Inbound SMS Settings
- Outbound can use Web API
"""
names = ("msg_mcommons_channel",)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
tablename = "msg_mcommons_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("campaign_id", length=128, unique=True,
requires = IS_NOT_EMPTY(),
),
Field("url",
default = \
"https://secure.mcommons.com/api/messages",
requires = IS_URL()
),
Field("username",
requires = IS_NOT_EMPTY(),
),
Field("password", "password",
readable = False,
requires = IS_NOT_EMPTY(),
),
Field("query"),
Field("timestmp", "datetime",
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "mcommons_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "mcommons_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "mcommons_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3ParsingModel(S3Model):
"""
Message Parsing Model
"""
names = ("msg_parser",
"msg_parsing_status",
"msg_session",
"msg_keyword",
"msg_sender",
"msg_parser_enabled",
"msg_parser_enable",
"msg_parser_disable",
"msg_parser_enable_interactive",
"msg_parser_disable_interactive",
)
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
channel_id = self.msg_channel_id
message_id = self.msg_message_id
# ---------------------------------------------------------------------
# Link between Message Channels and Parsers in parser.py
#
tablename = "msg_parser"
define_table(tablename,
# Source
channel_id(ondelete = "CASCADE"),
Field("function_name",
label = T("Parser"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_parser_onaccept,
)
set_method("msg", "parser",
method = "enable",
action = self.parser_enable_interactive)
set_method("msg", "parser",
method = "disable",
action = self.parser_disable_interactive)
set_method("msg", "parser",
method = "parse",
action = self.parser_parse)
# ---------------------------------------------------------------------
# Message parsing status
# - component to core msg_message table
#
tablename = "msg_parsing_status"
define_table(tablename,
# Component, not Instance
message_id(ondelete = "CASCADE"),
# Source
channel_id(ondelete = "CASCADE"),
Field("is_parsed", "boolean",
default = False,
label = T("Parsing Status"),
represent = lambda parsed: \
(parsed and [T("Parsed")] or \
[T("Not Parsed")])[0],
),
message_id("reply_id",
label = T("Reply"),
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Login sessions for Message Parsing
# - links a from_address with a login until expiry
#
tablename = "msg_session"
define_table(tablename,
Field("from_address"),
Field("email"),
Field("created_datetime", "datetime",
default = current.request.utcnow,
),
Field("expiration_time", "integer"),
Field("is_expired", "boolean",
default = False,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Keywords for Message Parsing
#
tablename = "msg_keyword"
define_table(tablename,
Field("keyword",
label = T("Keyword"),
),
# @ToDo: Move this to a link table
self.event_incident_type_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Senders for Message Parsing
# - whitelist / blacklist / prioritise
#
tablename = "msg_sender"
define_table(tablename,
Field("sender",
label = T("Sender"),
),
# @ToDo: Make pe_id work for this
#self.super_link("pe_id", "pr_pentity"),
Field("priority", "integer",
label = T("Priority"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict(msg_parser_enabled = self.parser_enabled,
msg_parser_enable = self.parser_enable,
msg_parser_disable = self.parser_disable,
)
# -----------------------------------------------------------------------------
@staticmethod
def parser_parse(r, **attr):
"""
Parse unparsed messages
S3Method for interactive requests
"""
record = r.record
current.s3task.async("msg_parse", args=[record.channel_id, record.function_name])
current.session.confirmation = \
current.T("The parse request has been submitted")
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_enabled(channel_id):
"""
Helper function to see if there is a Parser connected to a Channel
- used to determine whether to populate the msg_parsing_status table
"""
table = current.s3db.msg_parser
record = current.db(table.channel_id == channel_id).select(table.enabled,
limitby=(0, 1),
).first()
if record and record.enabled:
return True
else:
return False
# -------------------------------------------------------------------------
@staticmethod
def parser_enable(id):
"""
Enable a Parser
- Connect a Parser to a Channel
CLI API for shell scripts & to be called by S3Method
@ToDo: Ensure only 1 Parser is connected to any Channel at a time
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
record.update_record(enabled = True)
channel_id = record.channel_id
function_name = record.function_name
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (channel_id, function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Parser already enabled"
else:
current.s3task.schedule_task("msg_parse",
args = [channel_id, function_name],
period = 300, # seconds
timeout = 300, # seconds
repeats = 0 # unlimited
)
return "Parser enabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_enable_interactive(r, **attr):
"""
Enable a Parser
- Connect a Parser to a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_enable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_disable(id):
"""
Disable a Parser
- Disconnect a Parser from a Channel
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (record.channel_id, record.function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Parser disabled"
else:
return "Parser already disabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_disable_interactive(r, **attr):
"""
Disable a Parser
- Disconnect a Parser from a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_disable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def msg_parser_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_parser_disable(form.vars.id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_parser_enable(form.vars.id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_parser_enable(form.vars.id)
# =============================================================================
class S3RSSModel(S3ChannelModel):
"""
RSS channel
"""
names = ("msg_rss_channel",
"msg_rss",
)
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# RSS Settings for an account
#
tablename = "msg_rss_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name", length=255, unique=True,
label = T("Name"),
),
Field("description",
label = T("Description"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("url",
label = T("URL"),
requires = IS_URL(),
),
s3_datetime(label = T("Last Polled"),
writable = False,
),
Field("etag",
label = T("ETag"),
writable = False
),
*s3_meta_fields())
self.configure(tablename,
list_fields = ["name",
"description",
"enabled",
"url",
"date",
"channel_status.status",
],
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "rss_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "rss_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "rss_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# RSS Feed Posts
#
tablename = "msg_rss"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now",
label = T("Published on"),
),
Field("title",
label = T("Title"),
),
Field("body", "text",
label = T("Content"),
),
Field("from_address",
label = T("Link"),
),
# http://pythonhosted.org/feedparser/reference-feed-author_detail.html
Field("author",
label = T("Author"),
),
# http://pythonhosted.org/feedparser/reference-entry-tags.html
Field("tags", "list:string",
label = T("Tags"),
),
self.gis_location_id(),
# Just present for Super Entity
Field("inbound", "boolean",
default = True,
readable = False,
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = self.msg_rss_duplicate,
list_fields = ["channel_id",
"title",
"from_address",
"date",
"body"
],
super_entity = current.s3db.msg_message,
)
# ---------------------------------------------------------------------
return dict()
# ---------------------------------------------------------------------
@staticmethod
def msg_rss_duplicate(item):
"""
Import item deduplication, match by link (from_address)
@param item: the S3ImportItem instance
"""
if item.tablename == "msg_rss":
table = item.table
from_address = item.data.get("from_address")
query = (table.from_address == from_address)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3SMSModel(S3Model):
"""
SMS: Short Message Service
These can be received through a number of different gateways
- MCommons
- Modem (@ToDo: Restore this)
- Tropo
- Twilio
"""
names = ("msg_sms",)
def model(self):
#T = current.T
user = current.auth.user
if user and user.organisation_id:
# SMS Messages need to be tagged to their org so that they can be sent through the correct gateway
default = user.organisation_id
else:
default = None
# ---------------------------------------------------------------------
# SMS Messages: InBox & Outbox
#
tablename = "msg_sms"
self.define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
self.org_organisation_id(default = default),
s3_datetime(default="now"),
Field("body", "text",
# Allow multi-part SMS
#length = 160,
#label = T("Message"),
),
Field("from_address",
#label = T("Sender"),
),
Field("to_address",
#label = T("To"),
),
Field("inbound", "boolean",
default = False,
#represent = lambda direction: \
# (direction and [T("In")] or \
# [T("Out")])[0],
#label = T("Direction")),
),
# Used e.g. for Clickatell
Field("remote_id",
#label = T("Remote ID"),
),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3SMSOutboundModel(S3Model):
"""
SMS: Short Message Service
- Outbound Channels
These can be sent through a number of different gateways
- Modem
- SMTP
- Tropo
- Web API (inc Clickatell, MCommons, mVaayoo)
"""
names = ("msg_sms_outbound_gateway",
"msg_sms_modem_channel",
"msg_sms_smtp_channel",
"msg_sms_webapi_channel",
)
def model(self):
#T = current.T
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
# SMS Outbound Gateway
# - select which gateway is in active use for which Organisation/Branch
#
tablename = "msg_sms_outbound_gateway"
define_table(tablename,
self.msg_channel_id(
requires = IS_ONE_OF(current.db, "msg_channel.channel_id",
S3Represent(lookup="msg_channel"),
instance_types = ("msg_sms_modem_channel",
"msg_sms_webapi_channel",
"msg_sms_smtp_channel",
),
sort = True,
),
),
#Field("outgoing_sms_handler", length=32,
# requires = IS_IN_SET(current.msg.GATEWAY_OPTS,
# zero = None),
# ),
# Allow selection of different gateways based on Organisation/Branch
self.org_organisation_id(),
# @ToDo: Allow selection of different gateways based on destination Location
#self.gis_location_id(),
# @ToDo: Allow addition of relevant country code (currently in deployment_settings)
#Field("default_country_code", "integer",
# default = 44),
*s3_meta_fields())
# ---------------------------------------------------------------------
# SMS Modem Channel
#
tablename = "msg_sms_modem_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("modem_port"),
Field("modem_baud", "integer",
default = 115200,
),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# SMS via SMTP Channel
#
tablename = "msg_sms_smtp_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("address", length=64,
requires = IS_NOT_EMPTY(),
),
Field("subject", length=64),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# Settings for Web API services
#
# @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options
# + Advanced mode for raw access to real fields
#
tablename = "msg_sms_webapi_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("url",
default = "https://api.clickatell.com/http/sendmsg", # Clickatell
#default = "https://secure.mcommons.com/api/send_message", # Mobile Commons
requires = IS_URL(),
),
Field("parameters",
default = "user=yourusername&password=yourpassword&api_id=yourapiid", # Clickatell
#default = "campaign_id=yourid", # Mobile Commons
),
Field("message_variable", "string",
default = "text", # Clickatell
#default = "body", # Mobile Commons
requires = IS_NOT_EMPTY(),
),
Field("to_variable", "string",
default = "to", # Clickatell
#default = "phone_number", # Mobile Commons
requires = IS_NOT_EMPTY(),
),
Field("max_length", "integer",
default = 480, # Clickatell concat 3
),
# If using HTTP Auth (e.g. Mobile Commons)
Field("username"),
Field("password"),
Field("enabled", "boolean",
default = True,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TropoModel(S3Model):
"""
Tropo can be used to send & receive SMS, Twitter & XMPP
https://www.tropo.com
"""
names = ("msg_tropo_channel",
"msg_tropo_scratch",
)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Tropo Channels
#
tablename = "msg_tropo_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("token_messaging"),
#Field("token_voice"),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_channel",
)
set_method("msg", "tropo_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "tropo_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "tropo_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Tropo Scratch pad for outbound messaging
#
tablename = "msg_tropo_scratch"
define_table(tablename,
Field("row_id", "integer"),
Field("message_id", "integer"),
Field("recipient"),
Field("message"),
Field("network"),
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TwilioModel(S3ChannelModel):
"""
Twilio Inbound SMS channel
"""
names = ("msg_twilio_channel",
"msg_twilio_sid",
)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twilio Channels
#
tablename = "msg_twilio_channel"
define_table(tablename,
# Instance
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("account_name", length=255, unique=True),
Field("url",
default = \
"https://api.twilio.com/2010-04-01/Accounts"
),
Field("account_sid", length=64,
requires = IS_NOT_EMPTY(),
),
Field("auth_token", "password", length=64,
readable = False,
requires = IS_NOT_EMPTY(),
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "twilio_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "twilio_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "twilio_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twilio Message extensions
# - store message sid to know which ones we've already downloaded
#
tablename = "msg_twilio_sid"
define_table(tablename,
# Component not Instance
self.msg_message_id(ondelete = "CASCADE"),
Field("sid"),
*s3_meta_fields())
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3TwitterModel(S3Model):
names = ("msg_twitter_channel",
"msg_twitter",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Channel
#
tablename = "msg_twitter_channel"
define_table(tablename,
#Instance
self.super_link("channel_id", "msg_channel"),
# @ToDo: Allow different Twitter accounts for different PEs (Orgs / Teams)
#self.pr_pe_id(),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("twitter_account"),
Field("consumer_key", "password"),
Field("consumer_secret", "password"),
Field("access_token", "password"),
Field("access_token_secret", "password"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_channel_onaccept,
#onvalidation = self.twitter_channel_onvalidation
super_entity = "msg_channel",
)
set_method("msg", "twitter_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "twitter_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "twitter_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twitter Messages: InBox & Outbox
#
tablename = "msg_twitter"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now",
label = T("Posted on"),
),
Field("body", length=140,
label = T("Message"),
),
Field("from_address", #notnull=True,
label = T("From"),
represent = self.twitter_represent,
requires = IS_NOT_EMPTY(),
),
Field("to_address",
label = T("To"),
represent = self.twitter_represent,
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
),
Field("msg_id", # Twitter Message ID
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
#"priority",
#"category",
"body",
"from_address",
"date",
#"location_id",
],
#orderby = ~table.priority,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# -------------------------------------------------------------------------
@staticmethod
def twitter_represent(nickname, show_link=True):
"""
Represent a Twitter account
"""
if not nickname:
return current.messages["NONE"]
db = current.db
s3db = current.s3db
table = s3db.pr_contact
query = (table.contact_method == "TWITTER") & \
(table.value == nickname)
row = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if row:
repr = s3db.pr_pentity_represent(row.pe_id)
if show_link:
# Assume person
ptable = s3db.pr_person
row = db(ptable.pe_id == row.pe_id).select(ptable.id,
limitby=(0, 1)).first()
if row:
link = URL(c="pr", f="person", args=[row.id])
return A(repr, _href=link)
return repr
else:
return nickname
# -------------------------------------------------------------------------
@staticmethod
def twitter_channel_onvalidation(form):
"""
Complete oauth: take tokens from session + pin from form,
and do the 2nd API call to Twitter
"""
T = current.T
session = current.session
settings = current.deployment_settings.msg
s3 = session.s3
vars = form.vars
if vars.pin and s3.twitter_request_key and s3.twitter_request_secret:
try:
import tweepy
except:
raise HTTP(501, body=T("Can't import tweepy"))
oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key,
settings.twitter_oauth_consumer_secret)
oauth.set_request_token(s3.twitter_request_key,
s3.twitter_request_secret)
try:
oauth.get_access_token(vars.pin)
vars.oauth_key = oauth.access_token.key
vars.oauth_secret = oauth.access_token.secret
twitter = tweepy.API(oauth)
vars.twitter_account = twitter.me().screen_name
vars.pin = "" # we won't need it anymore
return
except tweepy.TweepError:
session.error = T("Settings were reset because authenticating with Twitter failed")
# Either user asked to reset, or error - clear everything
for k in ["oauth_key", "oauth_secret", "twitter_account"]:
vars[k] = None
for k in ["twitter_request_key", "twitter_request_secret"]:
s3[k] = ""
# =============================================================================
class S3TwitterSearchModel(S3ChannelModel):
"""
Twitter Searches
- results can be fed to KeyGraph
https://dev.twitter.com/docs/api/1.1/get/search/tweets
"""
names = ("msg_twitter_search",
"msg_twitter_result",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Search Query
#
tablename = "msg_twitter_search"
define_table(tablename,
Field("keywords", "text",
label = T("Keywords"),
),
# @ToDo: Allow setting a Point & Radius for filtering by geocode
#self.gis_location_id(),
Field("lang",
# Set in controller
#default = current.response.s3.language,
label = T("Language"),
),
Field("count", "integer",
default = 100,
label = T("# Results per query"),
),
Field("include_entities", "boolean",
default = False,
label = T("Include Entity Information?"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Entity Information"),
T("This is required if analyzing with KeyGraph."))),
),
# @ToDo: Rename or even move to Component Table
Field("is_processed", "boolean",
default = False,
label = T("Processed with KeyGraph?"),
represent = s3_yes_no_represent,
),
Field("is_searched", "boolean",
default = False,
label = T("Searched?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["keywords",
"lang",
"count",
#"include_entities",
],
)
# Reusable Query ID
represent = S3Represent(lookup=tablename, fields=["keywords"])
search_id = S3ReusableField("search_id", "reference %s" % tablename,
label = T("Search Query"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_twitter_search.id")
),
)
set_method("msg", "twitter_search",
method = "poll",
action = self.twitter_search_poll)
set_method("msg", "twitter_search",
method = "keygraph",
action = self.twitter_keygraph)
set_method("msg", "twitter_result",
method = "timeline",
action = self.twitter_timeline)
# ---------------------------------------------------------------------
# Twitter Search Results
#
# @ToDo: Store the places mentioned in the Tweet as linked Locations
#
tablename = "msg_twitter_result"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
# Just present for Super Entity
#self.msg_channel_id(),
search_id(),
s3_datetime(default="now",
label = T("Tweeted on"),
),
Field("tweet_id",
label = T("Tweet ID")),
Field("lang",
label = T("Language")),
Field("from_address",
label = T("Tweeted by")),
Field("body",
label = T("Tweet")),
# @ToDo: Populate from Parser
#Field("category",
# writable = False,
# label = T("Category"),
# ),
#Field("priority", "integer",
# writable = False,
# label = T("Priority"),
# ),
self.gis_location_id(),
# Just present for Super Entity
#Field("inbound", "boolean",
# default = True,
# readable = False,
# writable = False,
# ),
*s3_meta_fields())
configure(tablename,
list_fields = [#"category",
#"priority",
"body",
"from_address",
"date",
"location_id",
],
#orderby=~table.priority,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return dict()
# -----------------------------------------------------------------------------
@staticmethod
def twitter_search_poll(r, **attr):
"""
Perform a Search of Twitter
S3Method for interactive requests
"""
id = r.id
tablename = r.tablename
current.s3task.async("msg_twitter_search", args=[id])
current.session.confirmation = \
current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them")
# Filter results to this Search
redirect(URL(f="twitter_result",
vars={"~.search_id": id}))
# -----------------------------------------------------------------------------
@staticmethod
def twitter_keygraph(r, **attr):
"""
Prcoess Search Results with KeyGraph
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.async("msg_process_keygraph", args=[r.id])
current.session.confirmation = \
current.T("The search results are now being processed with KeyGraph")
# @ToDo: Link to KeyGraph results
redirect(URL(f="twitter_result"))
# =============================================================================
@staticmethod
def twitter_timeline(r, **attr):
"""
Display the Tweets on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
"""
if r.representation == "html" and r.name == "twitter_result":
response = current.response
s3 = response.s3
appname = r.application
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
rows = r.resource.select(["date", "body"], limit=2000, as_rows=True)
data = {"dateTimeFormat": "iso8601",
}
now = r.utcnow
tl_start = tl_end = now
events = []
import re
for row in rows:
# Dates
start = row.date or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body))
if len(title) > 30:
title = title[:30]
events.append({"start": start,
"title": title,
"description": row.body,
})
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _class="s3-timeline")
output = dict(item=item)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = current.T("Twitter Timeline")
response.view = "timeline.html"
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class S3XFormsModel(S3Model):
"""
XForms are used by the ODK Collect mobile client
http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android
"""
names = ("msg_xforms_store",)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# SMS store for persistence and scratch pad for combining incoming xform chunks
tablename = "msg_xforms_store"
self.define_table(tablename,
Field("sender", length=20),
Field("fileno", "integer"),
Field("totalno", "integer"),
Field("partno", "integer"),
Field("message", length=160)
)
# ---------------------------------------------------------------------
return dict()
# =============================================================================
class S3BaseStationModel(S3Model):
"""
Base Stations (Cell Towers) are a type of Site
@ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain
- see RadioMobile
"""
names = ("msg_basestation",)
def model(self):
T = current.T
define_table = self.define_table
# ---------------------------------------------------------------------
# Base Stations (Cell Towers)
#
tablename = "msg_basestation"
define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
# Deployments that don't wants site codes can hide them
#readable = False,
#writable = False,
# @ToDo: Deployment Setting to add validator to make these unique
),
self.org_organisation_id(
label = T("Operator"),
#widget=S3OrganisationAutocompleteWidget(default_from_profile=True),
requires = self.org_organisation_requires(required=True,
updateable=True),
),
self.gis_location_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_BASE = T("Create Base Station")
current.response.s3.crud_strings[tablename] = Storage(
label_create=T("Create Base Station"),
title_display=T("Base Station Details"),
title_list=T("Base Stations"),
title_update=T("Edit Base Station"),
title_upload=T("Import Base Stations"),
title_map=T("Map of Base Stations"),
label_list_button=T("List Base Stations"),
label_delete_button=T("Delete Base Station"),
msg_record_created=T("Base Station added"),
msg_record_modified=T("Base Station updated"),
msg_record_deleted=T("Base Station deleted"),
msg_list_empty=T("No Base Stations currently registered"))
self.configure(tablename,
deduplicate = self.msg_basestation_duplicate,
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# ---------------------------------------------------------------------
@staticmethod
def msg_basestation_duplicate(item):
"""
Import item deduplication, match by name
(Adding location_id doesn't seem to be a good idea)
@param item: the S3ImportItem instance
"""
if item.tablename == "msg_basestation":
table = item.table
name = "name" in item.data and item.data.name
query = (table.name.lower() == name.lower())
#location_id = None
# if "location_id" in item.data:
# location_id = item.data.location_id
## This doesn't find deleted records:
# query = query & (table.location_id == location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
# if duplicate is None and location_id:
## Search for deleted basestations with this name
# query = (table.name.lower() == name.lower()) & \
# (table.deleted == True)
# row = db(query).select(table.id, table.deleted_fk,
# limitby=(0, 1)).first()
# if row:
# fkeys = json.loads(row.deleted_fk)
# if "location_id" in fkeys and \
# str(fkeys["location_id"]) == str(location_id):
# duplicate = row
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# END =========================================================================
| mit |
CodeDJ/qt5-hidpi | qt/qtwebkit/Tools/QueueStatusServer/handlers/statusbubble_unittest.py | 128 | 2482 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from handlers.statusbubble import StatusBubble
from model.queues import Queue
class MockAttachment(object):
def __init__(self):
self.id = 1
def status_for_queue(self, queue):
return None
def position_in_queue(self, queue):
return 1
class StatusBubbleTest(unittest.TestCase):
def test_build_bubble(self):
bubble = StatusBubble()
queue = Queue("mac-ews")
attachment = MockAttachment()
bubble_dict = bubble._build_bubble(queue, attachment, 1)
# FIXME: assertDictEqual (in Python 2.7) would be better to use here.
self.assertEqual(bubble_dict["name"], "mac")
self.assertEqual(bubble_dict["attachment_id"], 1)
self.assertEqual(bubble_dict["queue_position"], 1)
self.assertEqual(bubble_dict["state"], "none")
self.assertEqual(bubble_dict["status"], None)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
LukeHoersten/ansible-modules-core | cloud/amazon/iam_cert.py | 102 | 11642 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certifcate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certifcates cannot have the same name')
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
try:
iam = boto.iam.connection.IAMConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_combined03.py | 1 | 1649 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_combined03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:dispBlanksAs']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart1 = workbook.add_chart({'type': 'column'})
chart2 = workbook.add_chart({'type': 'line'})
data = [
[2, 7, 3, 6, 2],
[20, 25, 10, 10, 20],
[4, 2, 5, 2, 1],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart1.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart1.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart2.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart1.combine(chart2)
worksheet.insert_chart('E9', chart1)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
Qinusty/rethinkdb | test/common/unit.py | 7 | 2157 | #!/user/bin/env python
# Copyright 2014-2015 RethinkDB, all rights reserved.
import collections, os, subprocess, sys
import test_framework, utils
class AllUnitTests(test_framework.Test):
def __init__(self, filters=[]):
super(AllUnitTests, self).__init__()
self.filters = filters
self.configured = False
self.tests = None
def filter(self, filter):
return AllUnitTests(self.filters + [filter])
def configure(self, conf):
unit_executable = os.path.join(conf['BUILD_DIR'], "rethinkdb-unittest")
if not os.access(unit_executable, os.X_OK):
sys.stderr.write('Warning: no useable rethinkdb-unittest executable at: %s\n' % unit_executable)
return test_framework.TestTree()
output = subprocess.check_output([unit_executable, "--gtest_list_tests"])
key = None
dict = collections.defaultdict(list)
for line in output.split("\n"):
if not line:
continue
elif line[-1] == '.':
key = line[:-1]
else:
dict[key].append(line.strip())
tests = test_framework.TestTree(
(group, UnitTest(unit_executable, group, tests))
for group, tests in dict.items())
for filter in self.filters:
tests = tests.filter(filter)
return tests
class UnitTest(test_framework.Test):
def __init__(self, unit_executable, test, child_tests=[]):
super(UnitTest, self).__init__()
self.unit_executable = unit_executable
self.test = test
self.child_tests = child_tests
def run(self):
filter = self.test
if self.child_tests:
filter = filter + ".*"
subprocess.check_call([self.unit_executable, "--gtest_filter=" + filter])
def filter(self, filter):
if filter.all_same() or not self.child_tests:
return self if filter.match() else None
tests = test_framework.TestTree((
(child, UnitTest(self.unit_executable, self.test + "." + child))
for child in self.child_tests))
return tests.filter(filter)
| agpl-3.0 |
coddingtonbear/d-rats | d_rats/mapdisplay.py | 3 | 52155 | #!/usr/bin/python
import os
import math
import urllib
import time
import random
import shutil
import tempfile
import threading
import copy
import gtk
import gobject
import mainapp
import platform
import miscwidgets
import inputdialog
import utils
import geocode_ui
import map_sources
import map_source_editor
import signals
from ui.main_common import ask_for_confirmation
from gps import GPSPosition, distance, value_with_units, DPRS_TO_APRS
CROSSHAIR = "+"
COLORS = ["red", "green", "cornflower blue", "pink", "orange", "grey"]
BASE_DIR = None
def set_base_dir(basedir):
global BASE_DIR
BASE_DIR = basedir
CONFIG = None
CONNECTED = True
MAX_TILE_LIFE = 0
PROXY = None
def set_connected(connected):
global CONNECTED
CONNECTED = connected
def set_tile_lifetime(lifetime):
global MAX_TILE_LIFE
MAX_TILE_LIFE = lifetime
def set_proxy(proxy):
global PROXY
PROXY = proxy
def fetch_url(url, local):
global CONNECTED
global PROXY
if not CONNECTED:
raise Exception("Not connected")
if PROXY:
proxies = {"http" : PROXY}
else:
proxies = None
data = urllib.urlopen(url, proxies=proxies)
local_file = file(local, "wb")
d = data.read()
local_file.write(d)
data.close()
local_file.close()
class MarkerEditDialog(inputdialog.FieldDialog):
def __init__(self):
inputdialog.FieldDialog.__init__(self, title=_("Add Marker"))
self.icons = []
for sym in sorted(DPRS_TO_APRS.values()):
icon = utils.get_icon(sym)
if icon:
self.icons.append((icon, sym))
self.add_field(_("Group"), miscwidgets.make_choice([], True))
self.add_field(_("Name"), gtk.Entry())
self.add_field(_("Latitude"), miscwidgets.LatLonEntry())
self.add_field(_("Longitude"), miscwidgets.LatLonEntry())
self.add_field(_("Lookup"), gtk.Button("By Address"))
self.add_field(_("Comment"), gtk.Entry())
self.add_field(_("Icon"), miscwidgets.make_pixbuf_choice(self.icons))
self._point = None
def set_groups(self, groups, group=None):
grpsel = self.get_field(_("Group"))
for grp in groups:
grpsel.append_text(grp)
if group is not None:
grpsel.child.set_text(group)
grpsel.set_sensitive(False)
else:
grpsel.child.set_text(_("Misc"))
def get_group(self):
return self.get_field(_("Group")).child.get_text()
def set_point(self, point):
self.get_field(_("Name")).set_text(point.get_name())
self.get_field(_("Latitude")).set_text("%.4f" % point.get_latitude())
self.get_field(_("Longitude")).set_text("%.4f" % point.get_longitude())
self.get_field(_("Comment")).set_text(point.get_comment())
iconsel = self.get_field(_("Icon"))
if isinstance(point, map_sources.MapStation):
symlist = [y for x,y in self.icons]
try:
iidx = symlist.index(point.get_aprs_symbol())
iconsel.set_active(iidx)
except ValueError:
print "No such symbol `%s'" % point.get_aprs_symbol()
else:
iconsel.set_sensitive(False)
self._point = point
def get_point(self):
name = self.get_field(_("Name")).get_text()
lat = self.get_field(_("Latitude")).value()
lon = self.get_field(_("Longitude")).value()
comment = self.get_field(_("Comment")).get_text()
idx = self.get_field(_("Icon")).get_active()
self._point.set_name(name)
self._point.set_latitude(lat)
self._point.set_longitude(lon)
self._point.set_comment(comment)
if isinstance(self._point, map_sources.MapStation):
self._point.set_icon_from_aprs_sym(self.icons[idx][1])
return self._point
# These functions taken from:
# http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = lat_deg * math.pi / 180.0
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return(xtile, ytile)
def num2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = lat_rad * 180.0 / math.pi
return(lat_deg, lon_deg)
class MapTile(object):
def path_els(self):
return deg2num(self.lat, self.lon, self.zoom)
def tile_edges(self):
n, w = num2deg(self.x, self.y, self.zoom)
s, e = num2deg(self.x+1, self.y+1, self.zoom)
return (s, w, n, e)
def lat_range(self):
s, w, n, e = self.tile_edges()
return (n, s)
def lon_range(self):
s, w, n, e = self.tile_edges()
return (w, e)
def path(self):
return "%d/%d/%d.png" % (self.zoom, self.x, self.y)
def _local_path(self):
path = os.path.join(self.dir, self.path())
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
return path
def is_local(self):
if MAX_TILE_LIFE == 0 or not CONNECTED:
return os.path.exists(self._local_path())
else:
try:
ts = os.stat(self._local_path()).st_mtime
return (time.time() - ts) < MAX_TILE_LIFE
except OSError:
return False
def fetch(self):
if not self.is_local():
for i in range(10):
url = self.remote_path()
try:
fetch_url(url, self._local_path())
return True
except Exception, e:
print "[%i] Failed to fetch `%s': %s" % (i, url, e)
return False
else:
return True
def _thread(self, cb, *args):
if self.fetch():
fname = self._local_path()
else:
fname = None
gobject.idle_add(cb, fname, *args)
def threaded_fetch(self, cb, *args):
_args = (cb,) + args
t = threading.Thread(target=self._thread, args=_args)
t.setDaemon(True)
t.start()
def local_path(self):
path = self._local_path()
self.fetch()
return path
def remote_path(self):
return "http://tile.openstreetmap.org/%s" % (self.path())
def __add__(self, count):
(x, y) = count
return MapTile(self.x+x, self.y+y, self.zoom)
def __sub__(self, tile):
return (self.x - tile.x, self.y - tile.y)
def __contains__(self, point):
(lat, lon) = point
# FIXME for non-western!
(lat_max, lat_min) = self.lat_range()
(lon_min, lon_max) = self.lon_range()
lat_match = (lat < lat_max and lat > lat_min)
lon_match = (lon < lon_max and lon > lon_min)
return lat_match and lon_match
def __init__(self, lat, lon, zoom):
self.zoom = zoom
if isinstance(lat, int) and isinstance(lon, int):
self.x = lat
self.y = lon
self.lat, self.lon = num2deg(self.x, self.y, self.zoom)
else:
self.lat = lat
self.lon = lon
self.x, self.y = deg2num(self.lat, self.lon, self.zoom)
if BASE_DIR:
self.dir = BASE_DIR
else:
p = platform.get_platform()
self.dir = os.path.join(p.config_dir(), "maps")
if not os.path.isdir(self.dir):
os.mkdir(self.dir)
def __str__(self):
return "%.4f,%.4f (%i,%i)" % (self.lat, self.lon, self.x, self.y)
class LoadContext(object):
pass
class MapWidget(gtk.DrawingArea):
__gsignals__ = {
"redraw-markers" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"new-tiles-loaded" : (gobject.SIGNAL_ACTION,
gobject.TYPE_NONE,
()),
}
def draw_text_marker_at(self, x, y, text, color="yellow"):
gc = self.get_style().black_gc
if self.zoom < 12:
size = 'size="x-small"'
elif self.zoom < 14:
size = 'size="small"'
else:
size = ''
text = utils.filter_to_ascii(text)
pl = self.create_pango_layout("")
markup = '<span %s background="%s">%s</span>' % (size, color, text)
pl.set_markup(markup)
self.window.draw_layout(gc, int(x), int(y), pl)
def draw_image_at(self, x, y, pb):
gc = self.get_style().black_gc
self.window.draw_pixbuf(gc,
pb,
0, 0,
int(x), int(y))
return pb.get_height()
def draw_cross_marker_at(self, x, y):
width = 2
cm = self.window.get_colormap()
color = cm.alloc_color("red")
gc = self.window.new_gc(foreground=color,
line_width=width)
x = int(x)
y = int(y)
self.window.draw_lines(gc, [(x, y-5), (x, y+5)])
self.window.draw_lines(gc, [(x-5, y), (x+5, y)])
def latlon2xy(self, lat, lon):
y = 1- ((lat - self.lat_min) / (self.lat_max - self.lat_min))
x = 1- ((lon - self.lon_min) / (self.lon_max - self.lon_min))
x *= (self.tilesize * self.width)
y *= (self.tilesize * self.height)
y += self.lat_fudge
return (x, y)
def xy2latlon(self, x, y):
y -= self.lat_fudge
lon = 1 - (float(x) / (self.tilesize * self.width))
lat = 1 - (float(y) / (self.tilesize * self.height))
lat = (lat * (self.lat_max - self.lat_min)) + self.lat_min
lon = (lon * (self.lon_max - self.lon_min)) + self.lon_min
return lat, lon
def draw_marker(self, label, lat, lon, img=None):
color = "red"
try:
x, y = self.latlon2xy(lat, lon)
except ZeroDivisionError:
return
if label == CROSSHAIR:
self.draw_cross_marker_at(x, y)
else:
if img:
y += (4 + self.draw_image_at(x, y, img))
self.draw_text_marker_at(x, y, label, color)
def expose(self, area, event):
if len(self.map_tiles) == 0:
self.load_tiles()
gc = self.get_style().black_gc
self.window.draw_drawable(gc,
self.pixmap,
0, 0,
0, 0,
-1, -1)
self.emit("redraw-markers")
def calculate_bounds(self):
center = MapTile(self.lat, self.lon, self.zoom)
topleft = center + (-2, -2)
botright = center + (2, 2)
(self.lat_min, _, _, self.lon_min) = botright.tile_edges()
(_, self.lon_max, self.lat_max, _) = topleft.tile_edges()
# I have no idea why, but for some reason we can calculate the
# longitude (x) just fine, but not the latitude (y). The result
# of either latlon2xy() or tile_edges() is bad, which causes the
# y calculation of a given latitude to be off by some amount.
# The amount grows large at small values of zoom (zoomed out) and
# starts to seriously affect the correctness of marker placement.
# Until I figure out why that is, we calculate a fudge factor below.
#
# To do this, we ask the center tile for its NW corner's
# coordinates. We then ask latlon2xy() (with fudge of zero) what
# the corresponding x,y is. Since we know what the correct value
# should be, we record the offset and use that to shift the y in
# further calculations for this zoom level.
self.lat_fudge = 0
s, w, n, e = center.tile_edges()
x, y = self.latlon2xy(n, w)
self.lat_fudge = ((self.height / 2) * self.tilesize) - y
if False:
print "------ Bounds Calculation ------"
print "Center tile should be at %i,%i" % (\
(self.height/2) * self.tilesize,
(self.width/2) * self.tilesize)
print "We calculate it based on Lat,Lon to be %i, %i" % (x, y)
print "--------------------------------"
print "Latitude Fudge Factor: %i (zoom %i)" % (self.lat_fudge,
self.zoom)
def broken_tile(self):
if self.__broken_tile:
return self.__broken_tile
broken = [
"48 16 3 1",
" c #FFFFFFFFFFFF",
"x c #FFFF00000000",
"X c #000000000000",
"xx xx XX X XXX ",
" xx xx X X X X X ",
" xx xx X X X X X ",
" xx xx X X X X X ",
" xx xx X X X X X ",
" xx xx X X X X X ",
" xx xx X XX XXX ",
" xxx ",
" xxx ",
" xx xx XXXX XX XXXXX XX ",
" xx xx X X X X X X X ",
" xx xx X X X X X X X ",
" xx xx X X X X X X X ",
" xx xx X X XXXXXX X XXXXXX ",
" xx xx X X X X X X X ",
"xx xx XXXX X X X X X "
]
# return gtk.gdk.pixbuf_new_from_xpm_data(broken)
pm = gtk.gdk.pixmap_create_from_xpm_d(self.window, None, broken)[0]
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
False,
8,
self.tilesize, self.tilesize)
pb.fill(0xffffffff)
x = y = (self.tilesize / 2)
pb.get_from_drawable(pm, pm.get_colormap(), 0, 0, x, y, -1, -1)
self.__broken_tile = pb
return pb
def draw_tile(self, path, x, y, ctx=None):
if ctx and ctx.zoom != self.zoom:
# Zoom level has chnaged, so don't do anything
return
gc = self.pixmap.new_gc()
if path:
try:
pb = gtk.gdk.pixbuf_new_from_file(path)
except Exception, e:
utils.log_exception()
pb = self.broken_tile()
else:
pb = self.broken_tile()
if ctx:
ctx.loaded_tiles += 1
frac = float(ctx.loaded_tiles) / float(ctx.total_tiles)
if ctx.loaded_tiles == ctx.total_tiles:
self.status(0.0, "")
else:
self.status(frac, _("Loaded") + " %.0f%%" % (frac * 100.0))
self.pixmap.draw_pixbuf(gc, pb, 0, 0, x, y, -1, -1)
self.queue_draw()
@utils.run_gtk_locked
def draw_tile_locked(self, *args):
self.draw_tile(*args)
def load_tiles(self):
self.map_tiles = []
ctx = LoadContext()
ctx.loaded_tiles = 0
ctx.total_tiles = self.width * self.height
ctx.zoom = self.zoom
center = MapTile(self.lat, self.lon, self.zoom)
delta_h = self.height / 2
delta_w = self.width / 2
count = 0
total = self.width * self.height
if not self.window:
# Window is not loaded, thus can't load tiles
return
try:
self.pixmap = gtk.gdk.Pixmap(self.window,
self.width * self.tilesize,
self.height * self.tilesize)
except Exception, e:
# Window is not loaded, thus can't load tiles
return
gc = self.pixmap.new_gc()
for i in range(0, self.width):
for j in range(0, self.height):
tile = center + (i - delta_w, j - delta_h)
if not tile.is_local():
message = _("Retrieving")
else:
message = _("Loading")
if tile.is_local():
path = tile._local_path()
self.draw_tile(tile._local_path(),
self.tilesize * i, self.tilesize * j,
ctx)
else:
self.draw_tile(None, self.tilesize * i, self.tilesize * j)
tile.threaded_fetch(self.draw_tile_locked,
self.tilesize * i,
self.tilesize * j,
ctx)
self.map_tiles.append(tile)
count += 1
self.calculate_bounds()
self.emit("new-tiles-loaded")
def export_to(self, filename, bounds=None):
if not bounds:
x = 0
y = 0
bounds = (0,0,-1,-1)
width = self.tilesize * self.width
height = self.tilesize * self.height
else:
x = bounds[0]
y = bounds[1]
width = bounds[2] - bounds[0]
height = bounds[3] - bounds[1]
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height)
pb.get_from_drawable(self.pixmap, self.pixmap.get_colormap(),
x, y, 0, 0, width, height)
pb.save(filename, "png")
def __init__(self, width, height, tilesize=256, status=None):
gtk.DrawingArea.__init__(self)
self.__broken_tile = None
self.height = height
self.width = width
self.tilesize = tilesize
self.status = status
self.lat = 0
self.lon = 0
self.zoom = 1
self.lat_max = self.lat_min = 0
self.lon_max = self.lon_min = 0
self.map_tiles = []
self.set_size_request(self.tilesize * self.width,
self.tilesize * self.height)
self.connect("expose-event", self.expose)
def set_center(self, lat, lon):
self.lat = lat
self.lon = lon
self.map_tiles = []
self.queue_draw()
def get_center(self):
return (self.lat, self.lon)
def set_zoom(self, zoom):
if zoom > 17 or zoom == 1:
return
self.zoom = zoom
self.map_tiles = []
self.queue_draw()
def get_zoom(self):
return self.zoom
def scale(self, x, y, pixels=128):
shift = 15
tick = 5
#rect = gtk.gdk.Rectangle(x-pixels,y-shift-tick,x,y)
#self.window.invalidate_rect(rect, True)
(lat_a, lon_a) = self.xy2latlon(self.tilesize, self.tilesize)
(lat_b, lon_b) = self.xy2latlon(self.tilesize * 2, self.tilesize)
# width of one tile
d = distance(lat_a, lon_a, lat_b, lon_b) * (float(pixels) / self.tilesize)
dist = value_with_units(d)
color = self.window.get_colormap().alloc_color("black")
gc = self.window.new_gc(line_width=1, foreground=color)
self.window.draw_line(gc, x-pixels, y-shift, x, y-shift)
self.window.draw_line(gc, x-pixels, y-shift, x-pixels, y-shift-tick)
self.window.draw_line(gc, x, y-shift, x, y-shift-tick)
self.window.draw_line(gc, x-(pixels/2), y-shift, x-(pixels/2), y-shift-tick)
pl = self.create_pango_layout("")
pl.set_markup("%s" % dist)
self.window.draw_layout(gc, x-pixels, y-shift, pl)
def point_is_visible(self, lat, lon):
for i in self.map_tiles:
if (lat, lon) in i:
return True
return False
class MapWindow(gtk.Window):
__gsignals__ = {
"reload-sources" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"user-send-chat" : signals.USER_SEND_CHAT,
"get-station-list" : signals.GET_STATION_LIST,
}
_signals = {"user-send-chat" : None,
"get-station-list" : None,
}
def zoom(self, widget, frame):
adj = widget.get_adjustment()
self.map.set_zoom(int(adj.value))
frame.set_label(_("Zoom") + " (%i)" % int(adj.value))
def make_zoom_controls(self):
box = gtk.HBox(False, 3)
box.set_border_width(3)
box.show()
l = gtk.Label(_("Min"))
l.show()
box.pack_start(l, 0,0,0)
adj = gtk.Adjustment(value=14,
lower=2,
upper=17,
step_incr=1,
page_incr=3)
sb = gtk.HScrollbar(adj)
sb.show()
box.pack_start(sb, 1,1,1)
l = gtk.Label(_("Max"))
l.show()
box.pack_start(l, 0,0,0)
frame = gtk.Frame(_("Zoom"))
frame.set_label_align(0.5, 0.5)
frame.set_size_request(150, 50)
frame.show()
frame.add(box)
sb.connect("value-changed", self.zoom, frame)
return frame
def toggle_show(self, group, *vals):
if group:
station = vals[1]
else:
group = vals[1]
station = None
for src in self.map_sources:
if group != src.get_name():
continue
if station:
try:
point = src.get_point_by_name(station)
except KeyError:
continue
point.set_visible(vals[0])
self.add_point_visible(point)
else:
src.set_visible(vals[0])
for point in src.get_points():
point.set_visible(vals[0])
self.update_point(src, point)
src.save()
break
self.map.queue_draw()
def marker_mh(self, _action, id, group):
action = _action.get_name()
if action == "delete":
print "Deleting %s/%s" % (group, id)
for source in self.map_sources:
if source.get_name() == group:
if not source.get_mutable():
return
point = source.get_point_by_name(id)
source.del_point(point)
source.save()
elif action == "edit":
for source in self.map_sources:
if source.get_name() == group:
break
if not source.get_mutable():
return
if not source:
return
for point in source.get_points():
if point.get_name() == id:
break
if not point:
return
_point = point.dup()
upoint, foo = self.prompt_to_set_marker(point, source.get_name())
if upoint:
self.del_point(source, _point)
self.add_point(source, upoint)
source.save()
def _make_marker_menu(self, store, iter):
menu_xml = """
<ui>
<popup name="menu">
<menuitem action="edit"/>
<menuitem action="delete"/>
<menuitem action="center"/>
</popup>
</ui>
"""
ag = gtk.ActionGroup("menu")
try:
id, = store.get(iter, 1)
group, = store.get(store.iter_parent(iter), 1)
except TypeError:
id = group = None
edit = gtk.Action("edit", _("Edit"), None, None)
edit.connect("activate", self.marker_mh, id, group)
if not id:
edit.set_sensitive(False)
ag.add_action(edit)
delete = gtk.Action("delete", _("Delete"), None, None)
delete.connect("activate", self.marker_mh, id, group)
ag.add_action(delete)
center = gtk.Action("center", _("Center on this"), None, None)
center.connect("activate", self.marker_mh, id, group)
# This isn't implemented right now, because I'm lazy
center.set_sensitive(False)
ag.add_action(center)
uim = gtk.UIManager()
uim.insert_action_group(ag, 0)
uim.add_ui_from_string(menu_xml)
return uim.get_widget("/menu")
def make_marker_popup(self, _, view, event):
if event.button != 3:
return
if event.window == view.get_bin_window():
x, y = event.get_coords()
pathinfo = view.get_path_at_pos(int(x), int(y))
if pathinfo is None:
return
else:
view.set_cursor_on_cell(pathinfo[0])
(store, iter) = view.get_selection().get_selected()
menu = self._make_marker_menu(store, iter)
if menu:
menu.popup(None, None, None, event.button, event.time)
def make_marker_list(self):
cols = [(gobject.TYPE_BOOLEAN, _("Show")),
(gobject.TYPE_STRING, _("Station")),
(gobject.TYPE_FLOAT, _("Latitude")),
(gobject.TYPE_FLOAT, _("Longitude")),
(gobject.TYPE_FLOAT, _("Distance")),
(gobject.TYPE_FLOAT, _("Direction")),
]
self.marker_list = miscwidgets.TreeWidget(cols, 1, parent=False)
self.marker_list.toggle_cb.append(self.toggle_show)
self.marker_list.connect("click-on-list", self.make_marker_popup)
self.marker_list._view.connect("row-activated", self.recenter_cb)
def render_station(col, rend, model, iter):
parent = model.iter_parent(iter)
if not parent:
parent = iter
group = model.get_value(parent, 1)
if self.colors.has_key(group):
rend.set_property("foreground", self.colors[group])
c = self.marker_list._view.get_column(1)
c.set_expand(True)
c.set_min_width(150)
r = c.get_cell_renderers()[0]
c.set_cell_data_func(r, render_station)
def render_coord(col, rend, model, iter, cnum):
if model.iter_parent(iter):
rend.set_property('text', "%.4f" % model.get_value(iter, cnum))
else:
rend.set_property('text', '')
for col in [2, 3]:
c = self.marker_list._view.get_column(col)
r = c.get_cell_renderers()[0]
c.set_cell_data_func(r, render_coord, col)
def render_dist(col, rend, model, iter, cnum):
if model.iter_parent(iter):
rend.set_property('text', "%.2f" % model.get_value(iter, cnum))
else:
rend.set_property('text', '')
for col in [4, 5]:
c = self.marker_list._view.get_column(col)
r = c.get_cell_renderers()[0]
c.set_cell_data_func(r, render_dist, col)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(self.marker_list.packable())
sw.set_size_request(-1, 150)
sw.show()
return sw
def refresh_marker_list(self, group=None):
(lat, lon) = self.map.get_center()
center = GPSPosition(lat=lat, lon=lon)
for item in self.marker_list.get_values(group):
try:
_parent, children = item
except ValueError:
# Empty group
continue
parent = _parent[1]
for child in children:
this = GPSPosition(lat=child[2], lon=child[3])
dist = center.distance_from(this)
bear = center.bearing_to(this)
self.marker_list.set_item(parent,
child[0],
child[1],
child[2],
child[3],
dist,
bear)
def make_track(self):
def toggle(cb, mw):
mw.tracking_enabled = cb.get_active()
cb = gtk.CheckButton(_("Track center"))
cb.connect("toggled", toggle, self)
cb.show()
return cb
def clear_map_cache(self):
d = gtk.MessageDialog(buttons=gtk.BUTTONS_YES_NO)
d.set_property("text", _("Are you sure you want to clear your map cache?"))
r = d.run()
d.destroy()
if r == gtk.RESPONSE_YES:
dir = os.path.join(platform.get_platform().config_dir(), "maps")
shutil.rmtree(dir, True)
self.map.queue_draw()
def printable_map(self, bounds=None):
p = platform.get_platform()
f = tempfile.NamedTemporaryFile()
fn = f.name
f.close()
mf = "%s.png" % fn
hf = "%s.html" % fn
ts = time.strftime("%H:%M:%S %d-%b-%Y")
station_map = _("Station map")
generated_at = _("Generated at")
html = """
<html>
<body>
<h2>D-RATS %s</h2>
<h5>%s %s</h5>
<img src="file://%s"/>
</body>
</html>
""" % (station_map, generated_at, ts, mf)
self.map.export_to(mf, bounds)
f = file(hf, "w")
f.write(html)
f.close()
p.open_html_file(hf)
def save_map(self, bounds=None):
p = platform.get_platform()
f = p.gui_save_file(default_name="map_%s.png" % \
time.strftime("%m%d%Y%_H%M%S"))
if not f:
return
if not f.endswith(".png"):
f += ".png"
self.map.export_to(f, bounds)
def get_visible_bounds(self):
ha = self.sw.get_hadjustment()
va = self.sw.get_vadjustment()
return (int(ha.value), int(va.value),
int(ha.value + ha.page_size), int(va.value + va.page_size))
def mh(self, _action):
action = _action.get_name()
if action == "refresh":
self.map_tiles = []
self.map.queue_draw()
elif action == "clearcache":
self.clear_map_cache()
elif action == "save":
self.save_map()
elif action == "savevis":
self.save_map(self.get_visible_bounds())
elif action == "printable":
self.printable_map()
elif action == "printablevis":
self.printable_map(self.get_visible_bounds())
elif action == "editsources":
srced = map_source_editor.MapSourcesEditor(self.config)
srced.run()
srced.destroy()
self.emit("reload-sources")
def make_menu(self):
menu_xml = """
<ui>
<menubar name="MenuBar">
<menu action="map">
<menuitem action="refresh"/>
<menuitem action="clearcache"/>
<menuitem action="editsources"/>
<menu action="export">
<menuitem action="printable"/>
<menuitem action="printablevis"/>
<menuitem action="save"/>
<menuitem action="savevis"/>
</menu>
</menu>
</menubar>
</ui>
"""
actions = [('map', None, "_" + _("Map"), None, None, self.mh),
('refresh', None, "_" + _("Refresh"), None, None, self.mh),
('clearcache', None, "_" + _("Clear Cache"), None, None, self.mh),
('editsources', None, _("Edit Sources"), None, None, self.mh),
('export', None, "_" + _("Export"), None, None, self.mh),
('printable', None, "_" + _("Printable"), "<Control>p", None, self.mh),
('printablevis', None, _("Printable (visible area)"), "<Control><Alt>P", None, self.mh),
('save', None, "_" + _("Save Image"), "<Control>s", None, self.mh),
('savevis', None, _('Save Image (visible area)'), "<Control><Alt>S", None, self.mh),
]
uim = gtk.UIManager()
self.menu_ag = gtk.ActionGroup("MenuBar")
self.menu_ag.add_actions(actions)
uim.insert_action_group(self.menu_ag, 0)
menuid = uim.add_ui_from_string(menu_xml)
self._accel_group = uim.get_accel_group()
return uim.get_widget("/MenuBar")
def make_controls(self):
vbox = gtk.VBox(False, 2)
vbox.pack_start(self.make_zoom_controls(), 0,0,0)
vbox.pack_start(self.make_track(), 0,0,0)
vbox.show()
return vbox
def make_bottom_pane(self):
box = gtk.HBox(False, 2)
box.pack_start(self.make_marker_list(), 1,1,1)
box.pack_start(self.make_controls(), 0,0,0)
box.show()
return box
def scroll_to_center(self, widget):
a = widget.get_vadjustment()
a.set_value((a.upper - a.page_size) / 2)
a = widget.get_hadjustment()
a.set_value((a.upper - a.page_size) / 2)
def center_on(self, lat, lon):
ha = self.sw.get_hadjustment()
va = self.sw.get_vadjustment()
x, y = self.map.latlon2xy(lat, lon)
ha.set_value(x - (ha.page_size / 2))
va.set_value(y - (va.page_size / 2))
def status(self, frac, message):
self.sb_prog.set_text(message)
self.sb_prog.set_fraction(frac)
def recenter(self, lat, lon):
self.map.set_center(lat, lon)
self.map.load_tiles()
self.refresh_marker_list()
self.center_on(lat, lon)
self.map.queue_draw()
def refresh(self):
self.map.load_tiles()
def prompt_to_set_marker(self, point, group=None):
def do_address(button, latw, lonw, namew):
dlg = geocode_ui.AddressAssistant()
r = dlg.run()
if r == gtk.RESPONSE_OK:
if not namew.get_text():
namew.set_text(dlg.place)
latw.set_text("%.5f" % dlg.lat)
lonw.set_text("%.5f" % dlg.lon)
d = MarkerEditDialog()
sources = []
for src in self.map_sources:
if src.get_mutable():
sources.append(src.get_name())
d.set_groups(sources, group)
d.set_point(point)
r = d.run()
if r == gtk.RESPONSE_OK:
point = d.get_point()
group = d.get_group()
d.destroy()
if r == gtk.RESPONSE_OK:
return point, group
else:
return None, None
def prompt_to_send_loc(self, _lat, _lon):
d = inputdialog.FieldDialog(title=_("Broadcast Location"))
d.add_field(_("Callsign"), gtk.Entry(8))
d.add_field(_("Description"), gtk.Entry(20))
d.add_field(_("Latitude"), miscwidgets.LatLonEntry())
d.add_field(_("Longitude"), miscwidgets.LatLonEntry())
d.get_field(_("Latitude")).set_text("%.4f" % _lat)
d.get_field(_("Longitude")).set_text("%.4f" % _lon)
while d.run() == gtk.RESPONSE_OK:
try:
call = d.get_field(_("Callsign")).get_text()
desc = d.get_field(_("Description")).get_text()
lat = d.get_field(_("Latitude")).get_text()
lon = d.get_field(_("Longitude")).get_text()
fix = GPSPosition(lat=lat, lon=lon, station=call)
fix.comment = desc
for port in self.emit("get-station-list").keys():
self.emit("user-send-chat",
"CQCQCQ", port,
fix.to_NMEA_GGA(), True)
break
except Exception, e:
utils.log_exception()
ed = gtk.MessageDialog(buttons=gtk.BUTTONS_OK, parent=d)
ed.set_property("text", _("Invalid value") + ": %s" % e)
ed.run()
ed.destroy()
d.destroy()
def recenter_cb(self, view, path, column, data=None):
model = view.get_model()
if model.iter_parent(model.get_iter(path)) == None:
return
items = self.marker_list.get_selected()
self.center_mark = items[1]
self.recenter(items[2], items[3])
self.sb_center.pop(self.STATUS_CENTER)
self.sb_center.push(self.STATUS_CENTER, _("Center") + ": %s" % self.center_mark)
def make_popup(self, vals):
def _an(cap):
return cap.replace(" ", "_")
xml = ""
for action in [_an(x) for x in self._popup_items.keys()]:
xml += "<menuitem action='%s'/>\n" % action
xml = """
<ui>
<popup name="menu">
<menuitem action='title'/>
<separator/>
%s
</popup>
</ui>
""" % xml
ag = gtk.ActionGroup("menu")
t = gtk.Action("title",
"%.4f,%.4f" % (vals["lat"], vals["lon"]),
None,
None)
t.set_sensitive(False)
ag.add_action(t)
for name, handler in self._popup_items.items():
action = gtk.Action(_an(name), name, None, None)
action.connect("activate", handler, vals)
ag.add_action(action)
uim = gtk.UIManager()
uim.insert_action_group(ag, 0)
uim.add_ui_from_string(xml)
return uim.get_widget("/menu")
def mouse_click_event(self, widget, event):
x,y = event.get_coords()
ha = widget.get_hadjustment()
va = widget.get_vadjustment()
mx = x + int(ha.get_value())
my = y + int(va.get_value())
lat, lon = self.map.xy2latlon(mx, my)
print "Button %i at %i,%i" % (event.button, mx, my)
if event.button == 3:
vals = { "lat" : lat,
"lon" : lon,
"x" : mx,
"y" : my }
menu = self.make_popup(vals)
if menu:
menu.popup(None, None, None, event.button, event.time)
elif event.type == gtk.gdk.BUTTON_PRESS:
print "Clicked: %.4f,%.4f" % (lat, lon)
# The crosshair marker has been missing since 0.3.0
#self.set_marker(GPSPosition(station=CROSSHAIR,
# lat=lat, lon=lon))
elif event.type == gtk.gdk._2BUTTON_PRESS:
print "Recenter on %.4f, %.4f" % (lat,lon)
self.recenter(lat, lon)
def mouse_move_event(self, widget, event):
if not self.__last_motion:
gobject.timeout_add(100, self._mouse_motion_handler)
self.__last_motion = (time.time(), event.x, event.y)
def _mouse_motion_handler(self):
if self.__last_motion == None:
return False
t, x, y = self.__last_motion
if (time.time() - t) < 0.5:
self.info_window.hide()
return True
lat, lon = self.map.xy2latlon(x, y)
ha = self.sw.get_hadjustment()
va = self.sw.get_vadjustment()
mx = x - int(ha.get_value())
my = y - int(va.get_value())
hit = False
for source in self.map_sources:
if not source.get_visible():
continue
for point in source.get_points():
if not point.get_visible():
continue
try:
_x, _y = self.map.latlon2xy(point.get_latitude(),
point.get_longitude())
except ZeroDivisionError:
continue
dx = abs(x - _x)
dy = abs(y - _y)
if dx < 20 and dy < 20:
hit = True
date = time.ctime(point.get_timestamp())
text = "<b>Station:</b> %s" % point.get_name() + \
"\n<b>Latitude:</b> %.5f" % point.get_latitude() + \
"\n<b>Longitude:</b> %.5f"% point.get_longitude() + \
"\n<b>Last update:</b> %s" % date
text += "\n<b>Info</b>: %s" % point.get_comment()
label = gtk.Label()
label.set_markup(text)
label.show()
for child in self.info_window.get_children():
self.info_window.remove(child)
self.info_window.add(label)
posx, posy = self.get_position()
posx += mx + 10
posy += my - 10
self.info_window.move(int(posx), int(posy))
self.info_window.show()
break
if not hit:
self.info_window.hide()
self.sb_coords.pop(self.STATUS_COORD)
self.sb_coords.push(self.STATUS_COORD, "%.4f, %.4f" % (lat, lon))
self.__last_motion = None
return False
def ev_destroy(self, widget, data=None):
self.hide()
return True
def ev_delete(self, widget, event, data=None):
self.hide()
return True
def update_gps_status(self, string):
self.sb_gps.pop(self.STATUS_GPS)
self.sb_gps.push(self.STATUS_GPS, string)
def add_point_visible(self, point):
if point in self.points_visible:
self.points_visible.remove(point)
if self.map.point_is_visible(point.get_latitude(),
point.get_longitude()):
if point.get_visible():
self.points_visible.append(point)
return True
else:
return False
else:
return False
def update_point(self, source, point):
(lat, lon) = self.map.get_center()
center = GPSPosition(*self.map.get_center())
this = GPSPosition(point.get_latitude(), point.get_longitude())
try:
self.marker_list.set_item(source.get_name(),
point.get_visible(),
point.get_name(),
point.get_latitude(),
point.get_longitude(),
center.distance_from(this),
center.bearing_to(this))
except Exception, e:
if str(e) == "Item not found":
# this is evil
print "Adding point instead of updating"
return self.add_point(source, point)
self.add_point_visible(point)
self.map.queue_draw()
def add_point(self, source, point):
(lat, lon) = self.map.get_center()
center = GPSPosition(*self.map.get_center())
this = GPSPosition(point.get_latitude(), point.get_longitude())
self.marker_list.add_item(source.get_name(),
point.get_visible(), point.get_name(),
point.get_latitude(),
point.get_longitude(),
center.distance_from(this),
center.bearing_to(this))
self.add_point_visible(point)
self.map.queue_draw()
def del_point(self, source, point):
self.marker_list.del_item(source.get_name(), point.get_name())
if point in self.points_visible:
self.points_visible.remove(point)
self.map.queue_draw()
def get_map_source(self, name):
for source in self.get_map_sources():
if source.get_name() == name:
return source
return None
def add_map_source(self, source):
self.map_sources.append(source)
self.marker_list.add_item(None,
source.get_visible(), source.get_name(),
0, 0, 0, 0)
for point in source.get_points():
self.add_point(source, point)
#source.connect("point-updated", self.update_point)
source.connect("point-added", self.add_point)
source.connect("point-deleted", self.del_point)
source.connect("point-updated", self.maybe_recenter_on_updated_point)
def update_points_visible(self):
for src in self.map_sources:
for point in src.get_points():
self.update_point(src, point)
self.map.queue_draw()
def maybe_recenter_on_updated_point(self, source, point):
if point.get_name() == self.center_mark and \
self.tracking_enabled:
print "Center updated"
self.recenter(point.get_latitude(), point.get_longitude())
self.update_point(source, point)
def clear_map_sources(self):
self.marker_list.clear()
self.map_sources = []
self.points_visible = []
self.update_points_visible()
def get_map_sources(self):
return self.map_sources
def redraw_markers(self, map):
for point in self.points_visible:
map.draw_marker(point.get_name(),
point.get_latitude(),
point.get_longitude(),
point.get_icon())
def __init__(self, config, *args):
gtk.Window.__init__(self, *args)
self.config = config
self.STATUS_COORD = 0
self.STATUS_CENTER = 1
self.STATUS_GPS = 2
self.center_mark = None
self.tracking_enabled = False
tiles = 5
self.points_visible = []
self.map_sources = []
self.map = MapWidget(tiles, tiles, status=self.status)
self.map.show()
self.map.connect("redraw-markers", self.redraw_markers)
self.map.connect("new-tiles-loaded",
lambda m: self.update_points_visible())
box = gtk.VBox(False, 2)
self.menubar = self.make_menu()
self.menubar.show()
box.pack_start(self.menubar, 0,0,0)
self.add_accel_group(self._accel_group)
self.sw = gtk.ScrolledWindow()
self.sw.add_with_viewport(self.map)
self.sw.show()
def pre_scale(sw, event, mw):
ha = mw.sw.get_hadjustment()
va = mw.sw.get_vadjustment()
px = ha.get_value() + ha.page_size
py = va.get_value() + va.page_size
rect = gtk.gdk.Rectangle(int(ha.get_value()), int(va.get_value()),
int(py), int(py))
mw.map.window.invalidate_rect(rect, True)
@utils.run_gtk_locked
def _scale(sw, event, mw):
ha = mw.sw.get_hadjustment()
va = mw.sw.get_vadjustment()
px = ha.get_value() + ha.page_size
py = va.get_value() + va.page_size
pm = mw.map.scale(int(px) - 5, int(py))
def scale(sw, event, mw):
gobject.idle_add(_scale, sw, event, mw)
self.sw.connect("expose-event", pre_scale, self)
self.sw.connect_after("expose-event", scale, self)
self.__last_motion = None
self.map.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.map.connect("motion-notify-event", self.mouse_move_event)
self.sw.connect("button-press-event", self.mouse_click_event)
self.sw.connect('realize', self.scroll_to_center)
hbox = gtk.HBox(False, 2)
self.sb_coords = gtk.Statusbar()
self.sb_coords.show()
self.sb_coords.set_has_resize_grip(False)
self.sb_center = gtk.Statusbar()
self.sb_center.show()
self.sb_center.set_has_resize_grip(False)
self.sb_gps = gtk.Statusbar()
self.sb_gps.show()
self.sb_prog = gtk.ProgressBar()
self.sb_prog.set_size_request(150, -1)
self.sb_prog.show()
hbox.pack_start(self.sb_coords, 1,1,1)
hbox.pack_start(self.sb_center, 1,1,1)
hbox.pack_start(self.sb_prog, 0,0,0)
hbox.pack_start(self.sb_gps, 1,1,1)
hbox.show()
box.pack_start(self.sw, 1,1,1)
box.pack_start(self.make_bottom_pane(), 0,0,0)
box.pack_start(hbox, 0,0,0)
box.show()
self.set_default_size(600,600)
self.set_geometry_hints(max_width=tiles*256,
max_height=tiles*256)
self.markers = {}
self.colors = {}
self.color_index = 0
self.add(box)
self.connect("destroy", self.ev_destroy)
self.connect("delete_event", self.ev_delete)
self._popup_items = {}
self.add_popup_handler(_("Center here"),
lambda a, vals:
self.recenter(vals["lat"],
vals["lon"]))
def set_mark_at(a, vals):
p = map_sources.MapStation("STATION", vals["lat"], vals["lon"])
p.set_icon_from_aprs_sym("\\<")
point, group = self.prompt_to_set_marker(p)
if not point:
return
for source in self.map_sources:
print "%s,%s" % (source.get_name(), group)
if source.get_name() == group:
print "Adding new point %s to %s" % (point.get_name(),
source.get_name())
source.add_point(point)
source.save()
return
# No matching group
q = "%s %s %s" % \
(_("Group"), group,
_("does not exist. Do you want to create it?"))
if not ask_for_confirmation(q):
return
s = map_sources.MapFileSource.open_source_by_name(self.config,
group,
True)
s.add_point(point)
s.save()
self.add_map_source(s)
self.add_popup_handler(_("New marker here"), set_mark_at)
self.add_popup_handler(_("Broadcast this location"),
lambda a, vals:
self.prompt_to_send_loc(vals["lat"],
vals["lon"]))
self.info_window = gtk.Window(gtk.WINDOW_POPUP)
self.info_window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU)
self.info_window.set_decorated(False)
self.info_window.modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse("yellow"))
def add_popup_handler(self, name, handler):
self._popup_items[name] = handler
def set_zoom(self, zoom):
self.map.set_zoom(zoom)
def set_center(self, lat, lon):
self.map.set_center(lat, lon)
if __name__ == "__main__":
import sys
import gps
if len(sys.argv) == 3:
m = MapWindow()
m.set_center(gps.parse_dms(sys.argv[1]),
gps.parse_dms(sys.argv[2]))
m.set_zoom(15)
else:
m = MapWindow()
m.set_center(45.525012, -122.916434)
m.set_zoom(14)
m.set_marker(GPSPosition(station="KI4IFW_H", lat=45.520, lon=-122.916434))
m.set_marker(GPSPosition(station="KE7FTE", lat=45.5363, lon=-122.9105))
m.set_marker(GPSPosition(station="KA7VQH", lat=45.4846, lon=-122.8278))
m.set_marker(GPSPosition(station="N7QQU", lat=45.5625, lon=-122.8645))
m.del_marker("N7QQU")
m.show()
try:
gtk.main()
except:
pass
# area = gtk.DrawingArea()
# area.set_size_request(768, 768)
#
# w = gtk.Window(gtk.WINDOW_TOPLEVEL)
# w.add(area)
# area.show()
# w.show()
#
# def expose(area, event):
# for i in range(1,4):
# img = gtk.gdk.pixbuf_new_from_file("/tmp/tile%i.png" % i)
# area.window.draw_pixbuf(area.get_style().black_gc,
# img,
# 0, 0, 256 * (i-1), 0, 256, 256)
#
# area.connect("expose-event", expose)
#
| gpl-3.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Tumblr/Post/EditAudioPostWithURL.py | 5 | 7243 | # -*- coding: utf-8 -*-
###############################################################################
#
# EditAudioPostWithURL
# Updates a specified audio post on a Tumblr blog using a specified external URL.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EditAudioPostWithURL(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EditAudioPostWithURL Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(EditAudioPostWithURL, self).__init__(temboo_session, '/Library/Tumblr/Post/EditAudioPostWithURL')
def new_input_set(self):
return EditAudioPostWithURLInputSet()
def _make_result_set(self, result, path):
return EditAudioPostWithURLResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EditAudioPostWithURLChoreographyExecution(session, exec_id, path)
class EditAudioPostWithURLInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EditAudioPostWithURL
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ExternalURL(self, value):
"""
Set the value of the ExternalURL input for this Choreo. ((required, string) The URL of the site that hosts the audio file (not Tumblr).)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('ExternalURL', value)
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('APIKey', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('AccessToken', value)
def set_BaseHostname(self, value):
"""
Set the value of the BaseHostname input for this Choreo. ((required, string) The standard or custom blog hostname (i.e. temboo.tumblr.com).)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('BaseHostname', value)
def set_Caption(self, value):
"""
Set the value of the Caption input for this Choreo. ((optional, string) The user-supplied caption. HTML is allowed.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('Caption', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((optional, date) The GMT date and time of the post. Can be an epoch timestamp in milliseconds or formatted like: Dec 8th, 2011 4:03pm. Defaults to NOW().)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('Date', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, integer) The ID of the post you want to edit.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('ID', value)
def set_Markdown(self, value):
"""
Set the value of the Markdown input for this Choreo. ((optional, boolean) Indicates whether the post uses markdown syntax. Defaults to false. Set to 1 to indicate true.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('Markdown', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('ResponseFormat', value)
def set_SecretKey(self, value):
"""
Set the value of the SecretKey input for this Choreo. ((required, string) The Secret Key provided by Tumblr (AKA the OAuth Consumer Secret).)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('SecretKey', value)
def set_Slug(self, value):
"""
Set the value of the Slug input for this Choreo. ((optional, string) Adds a short text summary to the end of the post URL.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('Slug', value)
def set_State(self, value):
"""
Set the value of the State input for this Choreo. ((optional, string) The state of the post. Specify one of the following: published, draft, queue. Defaults to published.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('State', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) Comma-separated tags for this post.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('Tags', value)
def set_Tweet(self, value):
"""
Set the value of the Tweet input for this Choreo. ((optional, string) Manages the autotweet (if enabled) for this post. Set to "off" for no tweet. Enter text to override the default tweet.)
"""
super(EditAudioPostWithURLInputSet, self)._set_input('Tweet', value)
class EditAudioPostWithURLResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EditAudioPostWithURL Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Tumblr. Default is JSON, can be set to XML by entering 'xml' in ResponseFormat.)
"""
return self._output.get('Response', None)
class EditAudioPostWithURLChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EditAudioPostWithURLResultSet(response, path)
| gpl-3.0 |
vmindru/ansible | lib/ansible/modules/cloud/amazon/_ec2_ami_find.py | 17 | 13419 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ec2_ami_find
version_added: '2.0'
short_description: Searches for AMIs to obtain the AMI ID and other information
deprecated:
removed_in: "2.9"
why: Various AWS modules have been combined and replaced with M(ec2_ami_facts).
alternative: Use M(ec2_ami_facts) instead.
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on
cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one
character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the
literal string *amazon?\.
ami_id:
description:
- An AMI ID to match.
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
architecture:
description:
- An architecture type to match (e.g. x86_64).
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
is_public:
description:
- Whether or not the image(s) are public.
type: bool
name:
description:
- An AMI name to match.
platform:
description:
- Platform type to match.
product_code:
description:
- Marketplace product code to match.
version_added: "2.3"
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
- Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id,
platform, root_device_name, root_device_type, state, and virtualization_type are supported.
choices:
- 'name'
- 'description'
- 'tag'
- 'architecture'
- 'block_device_mapping'
- 'creationDate'
- 'hypervisor'
- 'is_public'
- 'location'
- 'owner_id'
- 'platform'
- 'root_device_name'
- 'root_device_type'
- 'state'
- 'virtualization_type'
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
state:
description:
- AMI state to match.
default: 'available'
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
root_device_type:
description:
- Root device type to match (e.g. ebs, instance-store).
version_added: "2.5"
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
extends_documentation_fragment:
- aws
requirements:
- "python >= 2.6"
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
ami_tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_find.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
RETURN = '''
ami_id:
description: id of found amazon image
returned: when AMI found
type: str
sample: "ami-e9095e8c"
architecture:
description: architecture of image
returned: when AMI found
type: str
sample: "x86_64"
block_device_mapping:
description: block device mapping associated with image
returned: when AMI found
type: dict
sample: "{
'/dev/xvda': {
'delete_on_termination': true,
'encrypted': false,
'size': 8,
'snapshot_id': 'snap-ca0330b8',
'volume_type': 'gp2'
}"
creationDate:
description: creation date of image
returned: when AMI found
type: str
sample: "2015-10-15T22:43:44.000Z"
description:
description: description of image
returned: when AMI found
type: str
sample: "test-server01"
hypervisor:
description: type of hypervisor
returned: when AMI found
type: str
sample: "xen"
is_public:
description: whether image is public
returned: when AMI found
type: bool
sample: false
location:
description: location of image
returned: when AMI found
type: str
sample: "435210894375/test-server01-20151015-234343"
name:
description: ami name of image
returned: when AMI found
type: str
sample: "test-server01-20151015-234343"
owner_id:
description: owner of image
returned: when AMI found
type: str
sample: "435210894375"
platform:
description: platform of image
returned: when AMI found
type: str
sample: null
root_device_name:
description: root device name of image
returned: when AMI found
type: str
sample: "/dev/xvda"
root_device_type:
description: root device type of image
returned: when AMI found
type: str
sample: "ebs"
state:
description: state of image
returned: when AMI found
type: str
sample: "available"
tags:
description: tags assigned to image
returned: when AMI found
type: dict
sample: "{
'Environment': 'devel',
'Name': 'test-server01',
'Role': 'web'
}"
virtualization_type:
description: image virtualization type
returned: when AMI found
type: str
sample: "hvm"
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
def get_block_device_mapping(image):
"""
Retrieves block device mapping from AMI
"""
bdm_dict = dict()
bdm = getattr(image, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
'snapshot_id': bdm[device_name].snapshot_id,
'volume_type': bdm[device_name].volume_type,
'encrypted': bdm[device_name].encrypted,
'delete_on_termination': bdm[device_name].delete_on_termination
}
return bdm_dict
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
owner=dict(required=False, default=None),
ami_id=dict(required=False),
ami_tags=dict(required=False, type='dict',
aliases=['search_tags', 'image_tags']),
architecture=dict(required=False),
hypervisor=dict(required=False),
is_public=dict(required=False, type='bool'),
name=dict(required=False),
platform=dict(required=False),
product_code=dict(required=False),
sort=dict(required=False, default=None,
choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
sort_tag=dict(required=False),
sort_order=dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start=dict(required=False),
sort_end=dict(required=False),
state=dict(required=False, default='available'),
virtualization_type=dict(required=False),
no_result_action=dict(required=False, default='success',
choices=['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module.deprecate("The 'ec2_ami_find' module has been deprecated. Use 'ec2_ami_facts' instead.", version=2.9)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
product_code = module.params.get('product_code')
root_device_type = module.params.get('root_device_type')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:' + tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = 'true'
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if product_code:
filter['product-code'] = product_code
if root_device_type:
filter['root_device_type'] = root_device_type
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'block_device_mapping': get_block_device_mapping(image),
'creationDate': image.creationDate,
'description': image.description,
'hypervisor': image.hypervisor,
'is_public': image.is_public,
'location': image.location,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order == 'descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
dnguyen0304/clare | clare/clare/common/messaging/consumer/interfaces.py | 1 | 1596 | # -*- coding: utf-8 -*-
import abc
class IConsumer(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def consume(self, interval, timeout):
"""
Parameters
----------
interval : float
Rate of work. The units are in seconds.
timeout : float
Maximum duration to try fetching a new record. The units
are in seconds.
Returns
-------
None
"""
pass
@abc.abstractmethod
def _consume_once(self, timeout):
"""
Parameters
----------
timeout : float
Maximum duration to try fetching a new record. The units
are in seconds.
Returns
-------
None
Raises
------
clare.common.messaging.consumer.exceptions.FetchTimeout
If the fetcher times out before fetching the minimum fetch size.
"""
pass
class IFetcher(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def pop(self, timeout):
"""
Parameters
----------
timeout : float
Returns
-------
clare.common.messaging.records.Record
Raises
------
clare.common.messaging.consumer.exceptions.FetchTimeout
"""
pass
class IHandler(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def handle(self, record):
"""
Parameters
----------
record : clare.common.messaging.records.Record
"""
pass
| mit |
jrha/aquilon | tests/broker/test_manage_validate_branch.py | 2 | 6137 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the manage command."""
import os
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
class TestManageValidateBranch(TestBrokerCommand):
def test_000_add_managetest1_sandbox(self):
self.successtest(["add", "sandbox", "--sandbox", "managetest1"])
def test_000_add_managetest2_sandbox(self):
self.successtest(["add", "sandbox", "--sandbox", "managetest2"])
def test_100_manage_for_uncommitted_change(self):
# aquilon63.aqd-unittest.ms.com & aquilon64.aqd-unittest.ms.com are
# sitting in "%s/utsandbox" we manage it to managetest1 to start clean.
user = self.config.get("unittest", "user")
self.noouttest(["manage", "--hostname=aquilon63.aqd-unittest.ms.com",
"--sandbox=%s/managetest1" % user, "--force"])
self.noouttest(["manage", "--hostname=aquilon64.aqd-unittest.ms.com",
"--sandbox=%s/managetest1" % user, "--force"])
def test_101_make_uncommitted_change(self):
sandboxdir = os.path.join(self.sandboxdir, "managetest1")
template = self.find_template("aquilon", "archetype", "base",
sandbox="managetest1")
f = open(template)
try:
contents = f.readlines()
finally:
f.close()
contents.append("#Added by test_manage unittest %s \n" % sandboxdir)
f = open(template, 'w')
try:
f.writelines(contents)
finally:
f.close()
self.gitcommand(["add", template], cwd=sandboxdir)
def test_102_fail_uncommitted_change(self):
user = self.config.get("unittest", "user")
command = ["manage", "--hostname", "aquilon63.aqd-unittest.ms.com",
"--sandbox", "%s/managetest2" % user]
out = self.badrequesttest(command)
self.matchoutput(out,
"The source sandbox managetest1 contains uncommitted"
" files.",
command)
def test_110_commit_uncommitted_change(self):
sandboxdir = os.path.join(self.sandboxdir, "managetest1")
self.gitcommand(["commit", "-a", "-m",
"added test_manage unittest comment"], cwd=sandboxdir)
def test_112_fail_missing_committed_change_in_template_king(self):
user = self.config.get("unittest", "user")
command = ["manage", "--hostname", "aquilon63.aqd-unittest.ms.com",
"--sandbox", "%s/managetest2" % user]
out = self.badrequesttest(command)
self.matchoutput(out,
"The source sandbox managetest1 latest commit has "
"not been published to template-king yet.",
command)
def test_114_publish_committed_change(self):
sandboxdir = os.path.join(self.sandboxdir, "managetest1")
self.successtest(["publish", "--branch", "managetest1"],
env=self.gitenv(), cwd=sandboxdir)
def test_115_fail_missing_committed_change_in_target(self):
user = self.config.get("unittest", "user")
command = ["manage", "--hostname", "aquilon63.aqd-unittest.ms.com",
"--sandbox", "%s/managetest2" % user]
out = self.badrequesttest(command)
self.matchoutput(out,
"The target sandbox managetest2 does not contain the "
"latest commit from source sandbox managetest1.",
command)
def test_116_pull_committed_change(self):
kingdir = self.config.get("broker", "kingdir")
user = self.config.get("unittest", "user")
managetest2dir = os.path.join(self.sandboxdir, "managetest2")
self.gitcommand(["pull", "--no-ff", kingdir, "managetest1"],
cwd=managetest2dir)
def test_120_manage_committed(self):
user = self.config.get("unittest", "user")
self.noouttest(["manage", "--hostname=aquilon63.aqd-unittest.ms.com",
"--sandbox=%s/managetest2" % user])
def test_121_verify_manage_committed(self):
user = self.config.get("unittest", "user")
command = "show host --hostname aquilon63.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Primary Name: aquilon63.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Sandbox: %s/managetest2" % user, command)
def test_130_force_manage_committed(self):
user = self.config.get("unittest", "user")
self.noouttest(["manage", "--hostname=aquilon64.aqd-unittest.ms.com",
"--sandbox=%s/managetest2" % user, "--force"])
def test_131_verify_force_manage_committed(self):
user = self.config.get("unittest", "user")
command = "show host --hostname aquilon64.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Primary Name: aquilon64.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Sandbox: %s/managetest2" % user, command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestManageValidateBranch)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
fredmorcos/attic | projects/plantmaker/plantmaker-main/src/benchmark/benchmark.py | 1 | 2905 | from os import path
from extra.printer import pprint, GREEN, BLUE, RED
class Benchmark(object):
useCairoPlot = False
useGnuPlot = True
def __init__(self, plant, orderList, testNumber):
self.prefix = "generic"
self.testName = "generic"
self.testNumber = testNumber
self.cairoPlotTimes = []
self.gnuPlotTimes = []
self.plant = plant
self.orderList = orderList
self.orderListSize = -1
self.machineListSize = -1
self.startValue = 0
def addGnuPlotTime(self, x, y):
self.gnuPlotTimes.append((x, y))
pprint("PERF Time = " + str(y), GREEN)
def addCairoPlotTime(self, t):
self.cairoPlotTimes.append(t)
pprint("PERF Time = " + str(t), GREEN)
def prepare(self):
pprint("PERF Starting " + self.prefix + " benchmark test " +
str(self.testNumber) + " on " + self.testName, BLUE)
if self.orderListSize != -1:
self.orderList.orders = self.orderList.orders[:self.orderListSize]
if self.machineListSize != -1:
self.plant.machines = self.plant.machines[:self.machineListSize]
self.times = [i * 0 for i in range(self.startValue)]
def save(self):
if Benchmark.useCairoPlot == True:
self.plotCairoPlot()
if Benchmark.useGnuPlot == True:
self.plotGnuPlot()
def plotGnuPlot(self):
import os, subprocess
p = subprocess.Popen(['/usr/bin/gnuplot'], stdin = subprocess.PIPE,
stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = os.getcwd())
output = ""
hasFloat = False
for i in self.gnuPlotTimes:
if type(i[0]) == float:
hasFloat = True
output += str(i[0]) + " " + str(i[1]) + "\n"
with open("plantmaker-tmp", "w") as f:
f.write(output)
f.close()
of = "benchmarks/" + self.prefix + "-" + self.testName + "-" + \
str(self.testNumber) + "-gp.eps"
commString = "set grid; set term postscript; set out '" + of + "'; " + \
"set format y \"%.4f\"; " + "set xlabel \"" + self.testName + "\"; " + \
"set ylabel \"Time (Seconds)\"; unset key; "
if hasFloat == True:
commString += "set format x \"%.1f\"; "
commString += "plot 'plantmaker-tmp' with lines lw 3, 'plantmaker-tmp' with points pt 7 ps 1\n"
p.communicate(commString)
p.wait()
os.remove("plantmaker-tmp")
def plotCairoPlot(self):
try:
from thirdparty.CairoPlot import dot_line_plot
except:
pprint("PERF Will not output to graph. Install CairoPlot.", RED)
return
dot_line_plot(path.join("benchmarks", self.prefix + "-" + self.testName +
"-" + str(self.testNumber)) + ".png",
self.cairoPlotTimes, 800, 800, (255, 255, 255), 5, True, True, True,
None, None, None, None)
dot_line_plot(path.join("benchmarks", self.prefix + "-" + self.testName +
"-" + str(self.testNumber)) + ".ps",
self.cairoPlotTimes, 800, 800, (255, 255, 255), 5, True, True, True,
None, None, None, None)
def run(self):
self.prepare()
self.bench()
self.save()
def bench(self):
pass
| isc |
olopez32/syncless | googlecode_upload.py | 5 | 9044 | #!/usr/local/bin/stackless2.6
#
# Downloaded from http://support.googlecode.com/svn/trunk/scripts/googlecode_upload.py
# at Thu Apr 29 17:24:59 CEST 2010
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: [email protected] (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = '[email protected] (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of [email protected]. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
netroby/WinObjC | deps/3rdparty/icu/icu/source/test/depstest/dependencies.py | 198 | 7330 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014, International Business Machines
# Corporation and others. All Rights Reserved.
#
# file name: dependencies.py
#
# created on: 2011may26
"""Reader module for dependency data for the ICU dependency tester.
Reads dependencies.txt and makes the data available.
Attributes:
files: Set of "library/filename.o" files mentioned in the dependencies file.
items: Map from library or group names to item maps.
Each item has a "type" ("library" or "group" or "system_symbols").
A library or group item can have an optional set of "files" (as in the files attribute).
Each item can have an optional set of "deps" (libraries & groups).
A group item also has a "library" name unless it is a group of system symbols.
The one "system_symbols" item and its groups have sets of "system_symbols"
with standard-library system symbol names.
libraries: Set of library names mentioned in the dependencies file.
file_to_item: Map from a symbol (ushoe.o) to library or group (shoesize)
"""
__author__ = "Markus W. Scherer"
# TODO: Support binary items.
# .txt syntax: binary: tools/genrb
# item contents: {"type": "binary"} with optional files & deps
# A binary must not be used as a dependency for anything else.
import sys
files = set()
items = {}
libraries = set()
file_to_item = {}
_line_number = 0
_groups_to_be_defined = set()
def _CheckLibraryName(name):
global _line_number
if not name:
sys.exit("Error:%d: \"library: \" without name" % _line_number)
if name.endswith(".o"):
sys.exit("Error:%d: invalid library name %s" % (_line_number, name))
def _CheckGroupName(name):
global _line_number
if not name:
sys.exit("Error:%d: \"group: \" without name" % _line_number)
if "/" in name or name.endswith(".o"):
sys.exit("Error:%d: invalid group name %s" % (_line_number, name))
def _CheckFileName(name):
global _line_number
if "/" in name or not name.endswith(".o"):
sys.exit("Error:%d: invalid file name %s" % (_line_number, name))
def _RemoveComment(line):
global _line_number
_line_number = _line_number + 1
index = line.find("#") # Remove trailing comment.
if index >= 0: line = line[:index]
return line.rstrip() # Remove trailing newlines etc.
def _ReadLine(f):
while True:
line = _RemoveComment(f.next())
if line: return line
def _ReadFiles(deps_file, item, library_name):
global files
item_files = item.get("files")
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
if item_files == None: item_files = item["files"] = set()
for file_name in line.split():
_CheckFileName(file_name)
file_name = library_name + "/" + file_name
if file_name in files:
sys.exit("Error:%d: file %s listed in multiple groups" % (_line_number, file_name))
files.add(file_name)
item_files.add(file_name)
file_to_item[file_name] = item["name"]
def _IsLibrary(item): return item and item["type"] == "library"
def _IsLibraryGroup(item): return item and "library" in item
def _ReadDeps(deps_file, item, library_name):
global items, _line_number, _groups_to_be_defined
item_deps = item.get("deps")
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
if item_deps == None: item_deps = item["deps"] = set()
for dep in line.split():
_CheckGroupName(dep)
dep_item = items.get(dep)
if item["type"] == "system_symbols" and (_IsLibraryGroup(dep_item) or _IsLibrary(dep_item)):
sys.exit(("Error:%d: system_symbols depend on previously defined " +
"library or library group %s") % (_line_number, dep))
if dep_item == None:
# Add this dependency as a new group.
items[dep] = {"type": "group"}
if library_name: items[dep]["library"] = library_name
_groups_to_be_defined.add(dep)
item_deps.add(dep)
def _AddSystemSymbol(item, symbol):
exports = item.get("system_symbols")
if exports == None: exports = item["system_symbols"] = set()
exports.add(symbol)
def _ReadSystemSymbols(deps_file, item):
global _line_number
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
line = line.lstrip()
if '"' in line:
# One double-quote-enclosed symbol on the line, allows spaces in a symbol name.
symbol = line[1:-1]
if line.startswith('"') and line.endswith('"') and '"' not in symbol:
_AddSystemSymbol(item, symbol)
else:
sys.exit("Error:%d: invalid quoted symbol name %s" % (_line_number, line))
else:
# One or more space-separate symbols.
for symbol in line.split(): _AddSystemSymbol(item, symbol)
def Load():
"""Reads "dependencies.txt" and populates the module attributes."""
global items, libraries, _line_number, _groups_to_be_defined
deps_file = open("dependencies.txt")
try:
line = None
current_type = None
while True:
while not line: line = _RemoveComment(deps_file.next())
if line.startswith("library: "):
current_type = "library"
name = line[9:].lstrip()
_CheckLibraryName(name)
if name in items:
sys.exit("Error:%d: library definition using duplicate name %s" % (_line_number, name))
libraries.add(name)
item = items[name] = {"type": "library", "name": name}
line = _ReadFiles(deps_file, item, name)
elif line.startswith("group: "):
current_type = "group"
name = line[7:].lstrip()
_CheckGroupName(name)
if name not in items:
sys.exit("Error:%d: group %s defined before mentioned as a dependency" %
(_line_number, name))
if name not in _groups_to_be_defined:
sys.exit("Error:%d: group definition using duplicate name %s" % (_line_number, name))
_groups_to_be_defined.remove(name)
item = items[name]
item["name"] = name
library_name = item.get("library")
if library_name:
line = _ReadFiles(deps_file, item, library_name)
else:
line = _ReadSystemSymbols(deps_file, item)
elif line == " deps":
if current_type == "library":
line = _ReadDeps(deps_file, items[name], name)
elif current_type == "group":
item = items[name]
line = _ReadDeps(deps_file, item, item.get("library"))
elif current_type == "system_symbols":
item = items[current_type]
line = _ReadDeps(deps_file, item, None)
else:
sys.exit("Error:%d: deps before any library or group" % _line_number)
elif line == "system_symbols:":
current_type = "system_symbols"
if current_type in items:
sys.exit("Error:%d: duplicate entry for system_symbols" % _line_number)
item = items[current_type] = {"type": current_type, "name": current_type}
line = _ReadSystemSymbols(deps_file, item)
else:
sys.exit("Syntax error:%d: %s" % (_line_number, line))
except StopIteration:
pass
if _groups_to_be_defined:
sys.exit("Error: some groups mentioned in dependencies are undefined: %s" % _groups_to_be_defined)
| mit |
cdubz/babybuddy | reports/graphs/feeding_amounts.py | 1 | 1422 | # -*- coding: utf-8 -*-
from django.utils import timezone
from django.utils.translation import gettext as _
import plotly.offline as plotly
import plotly.graph_objs as go
from reports import utils
def feeding_amounts(instances):
"""
Create a graph showing daily feeding amounts over time.
:param instances: a QuerySet of Feeding instances.
:returns: a tuple of the the graph's html and javascript.
"""
totals = {}
for instance in instances:
end = timezone.localtime(instance.end)
date = end.date()
if date not in totals.keys():
totals[date] = 0
totals[date] += instance.amount or 0
amounts = [round(amount, 2) for amount in totals.values()]
trace = go.Bar(
name=_('Total feeding amount'),
x=list(totals.keys()),
y=amounts,
hoverinfo='text',
textposition='outside',
text=amounts
)
layout_args = utils.default_graph_layout_options()
layout_args['title'] = _('<b>Total Feeding Amounts</b>')
layout_args['xaxis']['title'] = _('Date')
layout_args['xaxis']['rangeselector'] = utils.rangeselector_date()
layout_args['yaxis']['title'] = _('Feeding amount')
fig = go.Figure({
'data': [trace],
'layout': go.Layout(**layout_args)
})
output = plotly.plot(fig, output_type='div', include_plotlyjs=False)
return utils.split_graph_output(output)
| bsd-2-clause |
geraldinepascal/FROGS | tools/phyloseq_beta_diversity/phyloseq_beta_diversity.py | 1 | 7336 | #!/usr/bin/env python3
#
# Copyright (C) 2018 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'Ta Thi Ngan & Maria Bernard INRA - SIGENAE'
__copyright__ = 'Copyright (C) 2017 INRA'
__license__ = 'GNU General Public License'
__version__ = '3.2.3'
__email__ = '[email protected]'
__status__ = 'prod'
import os
import sys
import argparse
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
FROGS_DIR=""
if CURRENT_DIR.endswith("phyloseq_beta_diversity"):
FROGS_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR))
else:
FROGS_DIR = os.path.dirname(CURRENT_DIR)
# PATH
BIN_DIR = os.path.abspath(os.path.join(FROGS_DIR, "libexec"))
os.environ['PATH'] = BIN_DIR + os.pathsep + os.environ['PATH']
APP_DIR = os.path.abspath(os.path.join(FROGS_DIR, "app"))
os.environ['PATH'] = APP_DIR + os.pathsep + os.environ['PATH']
# PYTHONPATH
LIB_DIR = os.path.abspath(os.path.join(FROGS_DIR, "lib"))
sys.path.append(LIB_DIR)
if os.getenv('PYTHONPATH') is None: os.environ['PYTHONPATH'] = LIB_DIR
else: os.environ['PYTHONPATH'] = LIB_DIR + os.pathsep + os.environ['PYTHONPATH']
# LIBR
LIBR_DIR = os.path.join(LIB_DIR,"external-lib")
from frogsUtils import *
##################################################################################################################################################
#
# COMMAND LINES
#
##################################################################################################################################################
class Rscript(Cmd):
"""
@summary: Launch Rmarkdown script to present the data beta diversity with phyloseq.
@see: http://rmarkdown.rstudio.com/
https://joey711.github.io/phyloseq/
@return: html file containing the plots
beta divesity distance matrix tsv file(s)
"""
def __init__(self, html, phyloseq, varExp, methods, outdir, rmd_stderr):
"""
@param html: [str] path to store resulting html file.
@param phyloseq: [str] path to phyloseq object in RData file, the result of FROGS Phyloseq Import Data.
@param varExp: [str] Experiment variable to split plot.
@param methods: [str] one or more of beta diversity method.
@param outdir: [str] The path to store resulting beta diversity distance matrix.
@param rmd_stderr: [str] Path to temporary Rmarkdown stderr output file
"""
rmd = os.path.join(CURRENT_DIR, "phyloseq_beta_diversity.Rmd")
Cmd.__init__( self,
'Rscript',
'Run 1 code Rmarkdown',
'-e "rmarkdown::render(' + "'" + rmd + "',knit_root_dir='" + outdir + "',output_file='" + html + \
"', params=list(phyloseq='" + phyloseq + "', varExp='" + varExp + "', methods='" + methods + "', libdir ='" + LIBR_DIR + "'), intermediates_dir='" + os.path.dirname(html) + "')" + '" 2> ' + rmd_stderr,
"-e '(sessionInfo()[[1]][13])[[1]][1]; paste(\"Rmarkdown version: \",packageVersion(\"rmarkdown\")) ; library(phyloseq); paste(\"Phyloseq version: \",packageVersion(\"phyloseq\"))'")
def get_version(self):
"""
@summary: Returns the program version number.
@return: [str] Version number if this is possible, otherwise this method return 'unknown'.
"""
return Cmd.get_version(self, 'stdout')
##################################################################################################################################################
#
# MAIN
#
##################################################################################################################################################
if __name__ == "__main__":
# Manage parameters
parser = argparse.ArgumentParser( description='To present the data beta diversity with phyloseq.')
parser.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." )
parser.add_argument( '--version', action='version', version=__version__ )
parser.add_argument('-v', '--varExp', type=str, required=True, default=None, help='The experiment variable you want to analyse.')
parser.add_argument('-m', '--distance-methods', required=True, type=str, default='bray,cc,unifrac,wunifrac', help='Comma separated values beta diversity methods available in Phyloseq (see https://www.bioconductor.org/packages/devel/bioc/manuals/phyloseq/man/phyloseq.pdf). [Default: %(default)s].')
# Inputs
group_input = parser.add_argument_group( 'Inputs' )
group_input.add_argument('-r','--rdata', required=True, default=None, help="The path of RData file containing a phyloseq object-the result of FROGS Phyloseq Import Data" )
# output
group_output = parser.add_argument_group( 'Outputs' )
group_output.add_argument('--matrix-outdir', required=True, action="store", type=str, help="Path to output matrix file")
group_output.add_argument('-o','--html', default='phyloseq_beta_diversity.nb.html', help="The HTML file containing the graphs. [Default: %(default)s]" )
group_output.add_argument( '-l', '--log-file', default=sys.stdout, help='This output file will contain several informations on executed commands.')
args = parser.parse_args()
prevent_shell_injections(args)
Logger.static_write(args.log_file, "## Application\nSoftware :" + sys.argv[0] + " (version : " + str(__version__) + ")\nCommand : " + " ".join(sys.argv) + "\n\n")
# check parameter
list_distance=["unifrac","wunifrac","bray","cc","dpcoa","jsd","manhattan","euclidean","canberra","kulczynski","jaccard","gower","altGower","morisita","horn","mountford","raup","binomial","chao","cao","wt","-1","c","wb","rt","I","e","t","me","j","sor","m","-2","co","g","-3","l","19","hk","rlb","sim","gl","z","maximum","binary","minkowski","ANY"]
methods = args.distance_methods.strip() if not args.distance_methods.strip()[-1]=="," else args.distance_methods.strip()[:-1]
for method in methods.split(","):
if method not in list_distance:
raise_exception( Exception( '\n\n#ERROR : Your method "'+str(method)+'", name is not correct !!! Please make sure that it is in the list:'+str(list_distance)+"\n\n"))
# Process
outdir = os.path.abspath(args.matrix_outdir)
if not os.path.exists(outdir):
os.makedirs(outdir)
phyloseq=os.path.abspath(args.rdata)
html=os.path.abspath(args.html)
try:
tmpFiles = TmpFiles(os.path.dirname(html))
rmd_stderr = tmpFiles.add("rmarkdown.stderr")
Rscript(html, phyloseq, args.varExp, methods, outdir, rmd_stderr).submit( args.log_file )
finally :
if not args.debug:
tmpFiles.deleteAll()
| gpl-3.0 |
edmorley/django | django/db/models/deletion.py | 38 | 13580 | from collections import Counter, OrderedDict
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import signals, sql
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super().__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.remote_field.model,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError(
"Cannot delete some instances of model '%s' because they are "
"referenced through a protected foreign key: '%s.%s'" % (
field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
f for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector:
def __init__(self, using):
self.using = using
# Initially, {model: {instances}}, later values become lists.
self.data = OrderedDict()
self.field_updates = {} # {model: {(field, value): {instances}}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Add 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Return a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(
source._meta.concrete_model, set()).add(model._meta.concrete_model)
return new_objs
def add_field_update(self, field, value, objs):
"""
Schedule a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
"""
Determine if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allow also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
return False
model = objs.model
if (signals.pre_delete.has_listeners(model) or
signals.post_delete.has_listeners(model) or
signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
return False
# Foreign keys pointing to this model, both from m2m and other
# models.
for related in get_candidate_relations_to_delete(opts):
if related.field.remote_field.on_delete is not DO_NOTHING:
return False
for field in model._meta.private_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
return False
return True
def get_del_batches(self, objs, field):
"""
Return the objs in suitably sized batches for the used connection.
"""
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size([field.name], objs), 1)
if len(objs) > conn_batch_size:
return [objs[i:i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)]
else:
return [objs]
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False):
"""
Add 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in concrete_model._meta.parents.values():
if ptr:
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
parents = model._meta.parents
for related in get_candidate_relations_to_delete(model._meta):
# Preserve parent reverse relationships if keep_parents=True.
if keep_parents and related.model in parents:
continue
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
batches = self.get_del_batches(new_objs, field)
for batch in batches:
sub_objs = self.related_objects(related, batch)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.remote_field.on_delete(self, field, sub_objs, self.using)
for field in model._meta.private_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs, source=model, nullable=True)
def related_objects(self, related, objs):
"""
Get a QuerySet of objects related to `objs` via the relation `related`.
"""
return related.related_model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in self.data.items():
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = OrderedDict((model, self.data[model])
for model in sorted_models)
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
deleted_counter[qs.model._meta.label] += count
# update fields
for model, instances_for_fieldvalues in self.field_updates.items():
query = sql.UpdateQuery(model)
for (field, value), instances in instances_for_fieldvalues.items():
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in self.data.values():
instances.reverse()
# delete instances
for model, instances in self.data.items():
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in self.field_updates.items():
for (field, value), instances in instances_for_fieldvalues.items():
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in self.data.items():
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
| bsd-3-clause |
Jamlum/pytomo | pytomo/dns/renderer.py | 2 | 12024 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Help for building DNS wire format messages"""
from __future__ import absolute_import
import cStringIO
import struct
import random
import time
from . import exception as dns_exception
from . import tsig as dns_tsig
from . import rdataclass as dns_rdataclass
from . import rdatatype as dns_rdatatype
QUESTION = 0
ANSWER = 1
AUTHORITY = 2
ADDITIONAL = 3
class Renderer(object):
"""Helper class for building DNS wire-format messages.
Most applications can use the higher-level L{dns_message.Message}
class and its to_wire() method to generate wire-format messages.
This class is for those applications which need finer control
over the generation of messages.
Typical use::
r = dns_renderer.Renderer(id=1, flags=0x80, max_size=512)
r.add_question(qname, qtype, qclass)
r.add_rrset(dns_renderer.ANSWER, rrset_1)
r.add_rrset(dns_renderer.ANSWER, rrset_2)
r.add_rrset(dns_renderer.AUTHORITY, ns_rrset)
r.add_edns(0, 0, 4096)
r.add_rrset(dns_renderer.ADDTIONAL, ad_rrset_1)
r.add_rrset(dns_renderer.ADDTIONAL, ad_rrset_2)
r.write_header()
r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
wire = r.get_wire()
@ivar output: where rendering is written
@type output: cStringIO.StringIO object
@ivar id: the message id
@type id: int
@ivar flags: the message flags
@type flags: int
@ivar max_size: the maximum size of the message
@type max_size: int
@ivar origin: the origin to use when rendering relative names
@type origin: dns_name.Name object
@ivar compress: the compression table
@type compress: dict
@ivar section: the section currently being rendered
@type section: int (dns_renderer.QUESTION, dns_renderer.ANSWER,
dns_renderer.AUTHORITY, or dns_renderer.ADDITIONAL)
@ivar counts: list of the number of RRs in each section
@type counts: int list of length 4
@ivar mac: the MAC of the rendered message (if TSIG was used)
@type mac: string
"""
def __init__(self, id=None, flags=0, max_size=65535, origin=None):
"""Initialize a new renderer.
@param id: the message id
@type id: int
@param flags: the DNS message flags
@type flags: int
@param max_size: the maximum message size; the default is 65535.
If rendering results in a message greater than I{max_size},
then L{dns_exception.TooBig} will be raised.
@type max_size: int
@param origin: the origin to use when rendering relative names
@type origin: dns_name.Namem or None.
"""
self.output = cStringIO.StringIO()
if id is None:
self.id = random.randint(0, 65535)
else:
self.id = id
self.flags = flags
self.max_size = max_size
self.origin = origin
self.compress = {}
self.section = QUESTION
self.counts = [0, 0, 0, 0]
self.output.write('\x00' * 12)
self.mac = ''
def _rollback(self, where):
"""Truncate the output buffer at offset I{where}, and remove any
compression table entries that pointed beyond the truncation
point.
@param where: the offset
@type where: int
"""
self.output.seek(where)
self.output.truncate()
keys_to_delete = []
for k, v in self.compress.iteritems():
if v >= where:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.compress[k]
def _set_section(self, section):
"""Set the renderer's current section.
Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
ADDITIONAL. Sections may be empty.
@param section: the section
@type section: int
@raises dns_exception.FormError: an attempt was made to set
a section value less than the current section.
"""
if self.section != section:
if self.section > section:
raise dns_exception.FormError
self.section = section
def add_question(self, qname, rdtype, rdclass=dns_rdataclass.IN):
"""Add a question to the message.
@param qname: the question name
@type qname: dns_name.Name
@param rdtype: the question rdata type
@type rdtype: int
@param rdclass: the question rdata class
@type rdclass: int
"""
self._set_section(QUESTION)
before = self.output.tell()
qname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack("!HH", rdtype, rdclass))
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns_exception.TooBig
self.counts[QUESTION] += 1
def add_rrset(self, section, rrset, **kw):
"""Add the rrset to the specified section.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param rrset: the rrset
@type rrset: dns_rrset.RRset object
"""
self._set_section(section)
before = self.output.tell()
n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns_exception.TooBig
self.counts[section] += n
def add_rdataset(self, section, name, rdataset, **kw):
"""Add the rdataset to the specified section, using the specified
name as the owner name.
Any keyword arguments are passed on to the rdataset's to_wire()
routine.
@param section: the section
@type section: int
@param name: the owner name
@type name: dns_name.Name object
@param rdataset: the rdataset
@type rdataset: dns_rdataset.Rdataset object
"""
self._set_section(section)
before = self.output.tell()
n = rdataset.to_wire(name, self.output, self.compress, self.origin,
**kw)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns_exception.TooBig
self.counts[section] += n
def add_edns(self, edns, ednsflags, payload, options=None):
"""Add an EDNS OPT record to the message.
@param edns: The EDNS level to use.
@type edns: int
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param options: The EDNS options list
@type options: list of dns_edns_Option instances
@see: RFC 2671
"""
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= 0xFF00FFFFL
ednsflags |= (edns << 16)
self._set_section(ADDITIONAL)
before = self.output.tell()
self.output.write(struct.pack('!BHHIH', 0, dns_rdatatype.OPT, payload,
ednsflags, 0))
if not options is None:
lstart = self.output.tell()
for opt in options:
stuff = struct.pack("!HH", opt.otype, 0)
self.output.write(stuff)
start = self.output.tell()
opt.to_wire(self.output)
end = self.output.tell()
assert end - start < 65536
self.output.seek(start - 2)
stuff = struct.pack("!H", end - start)
self.output.write(stuff)
self.output.seek(0, 2)
lend = self.output.tell()
assert lend - lstart < 65536
self.output.seek(lstart - 2)
stuff = struct.pack("!H", lend - lstart)
self.output.write(stuff)
self.output.seek(0, 2)
after = self.output.tell()
if after >= self.max_size:
self._rollback(before)
raise dns_exception.TooBig
self.counts[ADDITIONAL] += 1
def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data,
request_mac, algorithm=dns_tsig.default_algorithm):
"""Add a TSIG signature to the message.
@param keyname: the TSIG key name
@type keyname: dns_name.Name object
@param secret: the secret to use
@type secret: string
@param fudge: TSIG time fudge
@type fudge: int
@param id: the message id to encode in the tsig signature
@type id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param request_mac: This message is a response to the request which
had the specified MAC.
@param algorithm: the TSIG algorithm to use
@type request_mac: string
"""
self._set_section(ADDITIONAL)
before = self.output.tell()
s = self.output.getvalue()
(tsig_rdata, self.mac, ctx) = dns_tsig.sign(s,
keyname,
secret,
int(time.time()),
fudge,
id,
tsig_error,
other_data,
request_mac,
algorithm=algorithm)
keyname.to_wire(self.output, self.compress, self.origin)
self.output.write(struct.pack('!HHIH', dns_rdatatype.TSIG,
dns_rdataclass.ANY, 0, 0))
rdata_start = self.output.tell()
self.output.write(tsig_rdata)
after = self.output.tell()
assert after - rdata_start < 65536
if after >= self.max_size:
self._rollback(before)
raise dns_exception.TooBig
self.output.seek(rdata_start - 2)
self.output.write(struct.pack('!H', after - rdata_start))
self.counts[ADDITIONAL] += 1
self.output.seek(10)
self.output.write(struct.pack('!H', self.counts[ADDITIONAL]))
self.output.seek(0, 2)
def write_header(self):
"""Write the DNS message header.
Writing the DNS message header is done asfter all sections
have been rendered, but before the optional TSIG signature
is added.
"""
self.output.seek(0)
self.output.write(struct.pack('!HHHHHH', self.id, self.flags,
self.counts[0], self.counts[1],
self.counts[2], self.counts[3]))
self.output.seek(0, 2)
def get_wire(self):
"""Return the wire format message.
@rtype: string
"""
return self.output.getvalue()
| gpl-2.0 |
yanglr/book | a.py | 2 | 2054 | import os, pycurl, re, sys, shutil
md=":"
login=":"
def dxDown(url, fullpath):
c=pycurl.Curl() # 縮寫一下
c.setopt(c.FOLLOWLOCATION, True) # 允許重定向
c.setopt(pycurl.USERAGENT, b"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)") # 模擬瀏覽器
c.setopt(pycurl.URL, url) # 訪問指定網址
c.setopt(pycurl.COOKIEJAR, 'cookie.txt') # 把 cookie 存到文件
c.setopt(pycurl.COOKIEFILE, "cookie.txt") # 用文件掛 cookie
f = open(fullpath, 'wb') # 定義一個文件
c.setopt(c.WRITEDATA, f) # 指定返回信息的寫入文件,或作 c.setopt(c.WRITEFUNCTION, f.write)
c.perform() # 獲得服務器返回信息
f.close()
if c.getinfo(pycurl.HTTP_CODE) != 200:
os.remove(fullpath)
print("Failed!")
def getAtt(DXurl,md,login):
g = open("temp.txt", 'wb')
d=pycurl.Curl()
d.setopt(d.FOLLOWLOCATION, True)
d.setopt(pycurl.PROXY, md) # 掛代理
d.setopt(pycurl.PROXYUSERPWD, login) # 代理的用戶名密碼
d.setopt(pycurl.USERAGENT, b"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)")
d.setopt(pycurl.URL, DXurl)
d.setopt(pycurl.COOKIEJAR, 'dxcookie.txt')
d.setopt(pycurl.COOKIEFILE, "dxcookie.txt")
d.setopt(d.WRITEFUNCTION, g.write)
d.perform()
d.close()
g.close()
data=open("temp.txt","rU",encoding='UTF-8').read()
jpgPath=re.compile(r'(?<=jpgPath:")[0-9a-zA-Z\/]+(?=")')
strf=jpgPath.search(str(data)).group()
title=re.compile(u'(?<=<title>)[^<]+(?=</title>)')
bookname=title.search(str(data)).group()
ss=re.compile(r'(?<=ssid:.)\d{8}')
ssid=ss.search(str(data)).group()
legurl="http://img.duxiu.com"+strf+"leg001?zoom=2"
ssFolder="F:\\ss\\"+bookname+"_"+ssid+"\\"
FolderExist=os.path.isdir(ssFolder)
if FolderExist == False:
os.mkdir(r'f:/ss/'+bookname+'_'+ssid+'/')
legpath=ssFolder+"leg001.pdg"
dxDown(legurl,legpath)
shutil.move(ssFolder,"F:\\ss\\leg\\"+bookname+"_"+ssid+"\\")
os.remove("temp.txt")
os.remove("cookie.txt")
os.remove("dxcookie.txt")
def main():
getAtt(sys.argv[1],md,login)
if __name__ == "__main__":
main() | gpl-3.0 |
Detailscool/YHSpider | JiraStoryMaker/JiraStoryMaker2.py | 1 | 5993 | #!/usr/bin/python
# -*- coding:utf-8 -*-
# JiraStoryMaker.py
# Created by Henry on 2018/4/9
# Description :
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import json
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def create_story(**kwargs):
summary_text = kwargs.get('summary_text', None)
work_time_text = kwargs.get('work_time_text', None)
REQ = kwargs.get('REQ', None)
isFirst = kwargs.get('isFirst', False)
time.sleep(1)
new_button = driver.find_element_by_css_selector('#create_link')
new_button.click()
WebDriverWait(driver, 10000).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'span.drop-menu'))
)
drop_menus = driver.find_elements_by_css_selector('span.drop-menu')
if isFirst:
project = drop_menus[0]
project.click()
data_suggestions = driver.find_element_by_id('project-options').get_attribute('data-suggestions')
items = json.loads(data_suggestions)
# print items
if isinstance(items, list) and items and isinstance(items[0], dict) and isinstance(items[0]['items'], list) and items[0]['items'] and isinstance(items[0]['items'][0], dict) and items[0]['items'][0]['label']:
select_group = items[0]['items'][0]['label']
if u'IOSZHIBO' not in select_group:
groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if 'IOSZHIBO' in a.text]
# print '\ngroups:', groups
if groups:
groups[0].click()
print 'click'
time.sleep(0.5)
else:
project.click()
story_type = driver.find_element_by_id('issuetype-single-select')
story_type.click()
story_type_groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if u'故事'==a.text]
if story_type_groups:
story_type_groups[0].click()
time.sleep(0.5)
drop_menus = driver.find_elements_by_css_selector('span.drop-menu')
if len(drop_menus) < 5:
time.sleep(10)
print '出错啦'
sys.exit(1)
test_type = Select(driver.find_element_by_id('customfield_10200'))
test_type.select_by_value('10202')
time.sleep(0.5)
requirement = Select(driver.find_element_by_id('customfield_10101'))
requirement.select_by_value('10101')
time.sleep(0.5)
summary = driver.find_element_by_id('summary')
summary.send_keys(unicode(summary_text))
time.sleep(0.5)
work_time = driver.find_element_by_id('customfield_10833')
work_time.send_keys(work_time_text)
time.sleep(0.5)
sprint = drop_menus[5]
sprint.click()
sprint_groups = []
while not sprint_groups:
time.sleep(0.5)
sprint_groups = [a for a in driver.find_elements_by_css_selector('li a') if group in a.text and u'在用' in a.text]
sprint_groups[0].click()
time.sleep(0.5)
# time.sleep(15)
# code = driver.find_element_by_id('customfield_10503-3')
# code.click()
if REQ:
question = driver.find_element_by_css_selector('#issuelinks-issues-multi-select textarea')
question.send_keys(unicode(REQ))
time.sleep(0.5)
items = driver.find_elements_by_css_selector('li.menu-item')
if items and len(items) > 1:
relationship_item = items[1]
relationship_item.click()
time.sleep(0.5)
dev_person = driver.find_element_by_css_selector('#customfield_10300_container textarea')
if dev_person and login_token.split('-'):
dev_person.send_keys(login_token.split('-')[0])
time.sleep(0.5)
tester_person = driver.find_element_by_css_selector('#customfield_10400_container textarea')
if tester_person and tester:
tester_person.send_keys(tester)
time.sleep(0.5)
submit = driver.find_element_by_id('create-issue-submit')
submit.click()
WebDriverWait(driver, 10000).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="aui-flag-container"]/div/div/a'))
)
story = driver.find_element_by_xpath('//*[@id="aui-flag-container"]/div/div/a')
story_href = story.get_attribute('href')
print summary_text, ': ', story_href
# print '已建: ', summary_text, ', 时长, :', work_time_text, '天'
driver.refresh()
if __name__ == '__main__':
login_token = sys.argv[1]
file_path = sys.argv[2]
tester = sys.argv[3]
if not os.path.exists(file_path):
print '出错啦'
sys.exit(1)
else:
with open(file_path, 'r') as f:
lines = f.readlines()
f.close()
if '-' not in login_token:
print '出错啦'
sys.exit(1)
elif len(login_token.split('-')[-1]) != 32:
print '出错啦'
sys.exit(1)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=chrome_options)
url = '' + login_token
print url
driver.get(url)
# print driver.get_cookies()
group = u'iOS直播服务组'
for idx, line in enumerate(lines):
if ',' in line and ',' not in line:
words = line.encode('utf-8').strip().split(',')
elif ',' in line and ',' not in line:
words = line.encode('utf-8').strip().split(',')
else:
words = []
if len(words) == 2:
create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), isFirst=(idx==0))
elif len(words) == 3:
create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), REQ=words[2].strip(), isFirst=(idx==0))
driver.close() | mit |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/pbr/extra_files.py | 145 | 1096 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils import errors
import os
_extra_files = []
def get_extra_files():
global _extra_files
return _extra_files
def set_extra_files(extra_files):
# Let's do a sanity check
for filename in extra_files:
if not os.path.exists(filename):
raise errors.DistutilsFileError(
'%s from the extra_files option in setup.cfg does not '
'exist' % filename)
global _extra_files
_extra_files[:] = extra_files[:]
| apache-2.0 |
sposh-science/pycode-browser | Code/Physics/spring2.py | 6 | 1784 | """
spring2.py
The rk4_two() routine in this program does a two step integration using
an array method. The current x and xprime values are kept in a global
list named 'val'.
val[0] = current position; val[1] = current velocity
The results are compared with analytically calculated values.
"""
from pylab import *
def accn(t, val):
force = -spring_const * val[0] - damping * val[1]
return force/mass
def vel(t, val):
return val[1]
def rk4_two(t, h): # Time and Step value
global xxp # x and xprime values in a 'xxp'
k1 = [0,0] # initialize 5 empty lists.
k2 = [0,0]
k3 = [0,0]
k4 = [0,0]
tmp= [0,0]
k1[0] = vel(t,xxp)
k1[1] = accn(t,xxp)
for i in range(2): # value of functions at t + h/2
tmp[i] = xxp[i] + k1[i] * h/2
k2[0] = vel(t + h/2, tmp)
k2[1] = accn(t + h/2, tmp)
for i in range(2): # value of functions at t + h/2
tmp[i] = xxp[i] + k2[i] * h/2
k3[0] = vel(t + h/2, tmp)
k3[1] = accn(t + h/2, tmp)
for i in range(2): # value of functions at t + h
tmp[i] = xxp[i] + k3[i] * h
k4[0] = vel(t+h, tmp)
k4[1] = accn(t+h, tmp)
for i in range(2): # value of functions at t + h
xxp[i] = xxp[i] + ( k1[i] + \
2.0*k2[i] + 2.0*k3[i] + k4[i]) * h/ 6.0
t = 0.0 # Stating time
h = 0.01 # Runge-Kutta step size, time increment
xxp = [2.0, 0.0] # initial position & velocity
spring_const = 100.0 # spring constant
mass = 2.0 # mass of the oscillating object
damping = 0.0
tm = [0.0] # Lists to store time, position & velocity
x = [xxp[0]]
xp = [xxp[1]]
xth = [xxp[0]]
while t < 5:
rk4_two(t,h) # Do one step RK integration
t = t + h
tm.append(t)
xp.append(xxp[1])
x.append(xxp[0])
th = 2.0 * cos(sqrt(spring_const/mass)* (t))
xth.append(th)
plot(tm,x)
plot(tm,xth,'+')
show()
| gpl-3.0 |
ASCrookes/django | tests/gis_tests/distapp/tests.py | 154 | 33547 | from __future__ import unicode_literals
from django.contrib.gis.db.models.functions import (
Area, Distance, Length, Perimeter, Transform,
)
from django.contrib.gis.geos import GEOSGeometry, LineString, Point
from django.contrib.gis.measure import D # alias for Distance
from django.db import connection
from django.db.models import Q
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils.deprecation import RemovedInDjango20Warning
from ..utils import no_oracle, oracle, postgis, spatialite
from .models import (
AustraliaCity, CensusZipcode, Interstate, SouthTexasCity, SouthTexasCityFt,
SouthTexasInterstate, SouthTexasZipcode,
)
@skipUnlessDBFeature("gis_enabled")
class DistanceTest(TestCase):
fixtures = ['initial']
def setUp(self):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transformed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
self.stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
self.au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test_init(self):
"""
Test initialization of distance models.
"""
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@skipUnlessDBFeature("supports_dwithin_lookup")
def test_dwithin(self):
"""
Test the `dwithin` lookup type.
"""
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple):
dist1, dist2 = dist
else:
dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle:
type_error = True
else:
type_error = False
if isinstance(dist, tuple):
if oracle:
dist = dist[1]
else:
dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
@skipUnlessDBFeature("has_distance_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_distance_projected(self):
"""
Test the `distance` GeoQuerySet method on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140))
# FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278))
# FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point').order_by('id')
dist2 = SouthTexasCity.objects.distance(lagrange).order_by('id') # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt).order_by('id') # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange).order_by('id')
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_distance_geodetic(self):
"""
Test the `distance` GeoQuerySet method on geodetic coordinate systems.
"""
tol = 2 if oracle else 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)))
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326))
# FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326),
# 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326))
# FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True).order_by('id')
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point).order_by('id')
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
@skipUnlessDBFeature("has_distance_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_distance_transform(self):
"""
Test the `distance` GeoQuerySet method used with `transform` on a geographic field.
"""
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140),
# ST_GeomFromText('<buffer_wkt>', 32140))
# FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf).order_by('name')
self.assertListEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
@skipUnlessDBFeature("supports_distances_lookups")
def test_distance_lookups(self):
"""
Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types.
"""
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(
point__distance_lte=(self.stx_pnt, D(km=20)),
)
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(
point__distance_lte=(self.stx_pnt, D(km=20)),
)
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test_geodetic_distance_lookups(self):
"""
Test distance lookups on geodetic coordinate systems.
"""
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
# Too many params (4 in this case) should raise a ValueError.
queryset = AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4'))
self.assertRaises(ValueError, len, queryset)
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
querysets = [qs1]
if connection.features.has_distance_spheroid_method:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets.append(qs2)
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
@skipUnlessDBFeature("has_area_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_area(self):
"""
Test the `area` GeoQuerySet method.
"""
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
@skipUnlessDBFeature("has_length_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_length(self):
"""
Test the `length` GeoQuerySet method.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563,
# AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if connection.features.supports_distance_geodetic:
qs = Interstate.objects.length()
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
else:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@skipUnlessDBFeature("has_perimeter_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_perimeter(self):
"""
Test the `perimeter` GeoQuerySet method.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
@skipUnlessDBFeature("has_area_method", "has_distance_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_measurement_null_fields(self):
"""
Test the measurement GeoQuerySet methods on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
@skipUnlessDBFeature("has_distance_method")
@ignore_warnings(category=RemovedInDjango20Warning)
def test_distance_order_by(self):
qs = SouthTexasCity.objects.distance(Point(3, 3)).order_by(
'distance'
).values_list('name', flat=True).filter(name__in=('San Antonio', 'Pearland'))
self.assertQuerysetEqual(qs, ['San Antonio', 'Pearland'], lambda x: x)
'''
=============================
Distance functions on PostGIS
=============================
| Projected Geometry | Lon/lat Geometry | Geography (4326)
ST_Distance(geom1, geom2) | OK (meters) | :-( (degrees) | OK (meters)
ST_Distance(geom1, geom2, use_spheroid=False) | N/A | N/A | OK (meters), less accurate, quick
Distance_Sphere(geom1, geom2) | N/A | OK (meters) | N/A
Distance_Spheroid(geom1, geom2, spheroid) | N/A | OK (meters) | N/A
================================
Distance functions on Spatialite
================================
| Projected Geometry | Lon/lat Geometry
ST_Distance(geom1, geom2) | OK (meters) | N/A
ST_Distance(geom1, geom2, use_ellipsoid=True) | N/A | OK (meters)
ST_Distance(geom1, geom2, use_ellipsoid=False) | N/A | OK (meters), less accurate, quick
'''
@skipUnlessDBFeature("gis_enabled")
class DistanceFunctionsTests(TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("has_Area_function")
def test_area(self):
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.annotate(area=Area('poly')).order_by('name')):
# MySQL is returning a raw float value
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m if hasattr(z.area, 'sq_m') else z.area, tol)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_simple(self):
"""
Test a simple distance query, with projected coordinates and without
transformation.
"""
lagrange = GEOSGeometry('POINT(805066.295722839 4231496.29461335)', 32140)
houston = SouthTexasCity.objects.annotate(dist=Distance('point', lagrange)).order_by('id').first()
tol = 2 if oracle else 5
self.assertAlmostEqual(
houston.dist.m if hasattr(houston.dist, 'm') else houston.dist,
147075.069813,
tol
)
@skipUnlessDBFeature("has_Distance_function", "has_Transform_function")
def test_distance_projected(self):
"""
Test the `Distance` function on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140))
# FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278))
# FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.annotate(distance=Distance('point', lagrange)).order_by('id')
if spatialite or oracle:
dist_qs = [dist1]
else:
dist2 = SouthTexasCityFt.objects.annotate(distance=Distance('point', lagrange)).order_by('id')
# Using EWKT string parameter.
dist3 = SouthTexasCityFt.objects.annotate(distance=Distance('point', lagrange.ewkt)).order_by('id')
dist_qs = [dist1, dist2, dist3]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_geodetic(self):
"""
Test the `Distance` function on geodetic coordinate systems.
"""
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)), srid=4326)
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326))
# FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.annotate(distance=Distance('point', ls)).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_geodetic_spheroid(self):
tol = 2 if oracle else 5
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326),
# 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326))
# FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).annotate(
distance=Distance('point', hillsdale.point, spheroid=True)
).order_by('id')
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).annotate(
distance=Distance('point', hillsdale.point)
).order_by('id')
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
@skipUnlessDBFeature("has_Distance_function", 'has_Transform_function')
def test_distance_transform(self):
"""
Test the `Distance` function used with `Transform` on a geographic field.
"""
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140),
# ST_GeomFromText('<buffer_wkt>', 32140))
# FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').annotate(
distance=Distance(Transform('poly', 32140), buf)
).order_by('name')
self.assertEqual(ref_zips, sorted([c.name for c in qs]))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_order_by(self):
qs = SouthTexasCity.objects.annotate(distance=Distance('point', Point(3, 3, srid=32140))).order_by(
'distance'
).values_list('name', flat=True).filter(name__in=('San Antonio', 'Pearland'))
self.assertQuerysetEqual(qs, ['San Antonio', 'Pearland'], lambda x: x)
@skipUnlessDBFeature("has_Length_function")
def test_length(self):
"""
Test the `Length` function.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563,
# AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if connection.features.supports_length_geodetic:
qs = Interstate.objects.annotate(length=Length('path'))
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# TODO: test with spheroid argument (True and False)
else:
# Does not support geodetic coordinate systems.
with self.assertRaises(NotImplementedError):
list(Interstate.objects.annotate(length=Length('path')))
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.annotate(length=Length('path')).get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m if isinstance(i10.length, D) else i10.length, 2)
self.assertTrue(
SouthTexasInterstate.objects.annotate(length=Length('path')).filter(length__gt=4000).exists()
)
@skipUnlessDBFeature("has_Perimeter_function")
def test_perimeter(self):
"""
Test the `Perimeter` function.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
qs = SouthTexasZipcode.objects.annotate(perimeter=Perimeter('poly')).order_by('name')
for i, z in enumerate(qs):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
qs = SouthTexasCity.objects.annotate(perim=Perimeter('point'))
for city in qs:
self.assertEqual(0, city.perim.m)
@skipUnlessDBFeature("supports_null_geometries", "has_Area_function", "has_Distance_function")
def test_measurement_null_fields(self):
"""
Test the measurement functions on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.annotate(
distance=Distance('poly', htown.point), area=Area('poly')
).get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
| bsd-3-clause |
citrix-openstack-build/ceilometer | tests/storage/test_impl_hbase.py | 3 | 1963 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012, 2013 Dell Inc.
#
# Author: Stas Maksimov <[email protected]>
# Author: Shengjie Min <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_hbase.py
.. note::
In order to run the tests against real HBase server set the environment
variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before
running the tests. Make sure the Thrift server is running on that server.
"""
from oslo.config import cfg
from ceilometer.storage.impl_hbase import Connection
from ceilometer.storage.impl_hbase import MConnection
from ceilometer.tests import db as tests_db
class HBaseEngineTestBase(tests_db.TestBase):
database_connection = 'hbase://__test__'
class ConnectionTest(HBaseEngineTestBase):
def test_hbase_connection(self):
cfg.CONF.database.connection = self.database_connection
conn = Connection(cfg.CONF)
self.assertIsInstance(conn.conn, MConnection)
class TestConn(object):
def __init__(self, host, port):
self.netloc = '%s:%s' % (host, port)
def open(self):
pass
cfg.CONF.database.connection = 'hbase://test_hbase:9090'
self.stubs.Set(Connection, '_get_connection',
lambda self, x: TestConn(x['host'], x['port']))
conn = Connection(cfg.CONF)
self.assertIsInstance(conn.conn, TestConn)
| apache-2.0 |
Subsets and Splits