repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
karesansui/karesansui | bin/restart_network.py | 1 | 4392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException
from karesansui.lib.const import NETWORK_IFCONFIG_COMMAND, NETWORK_BRCTL_COMMAND
from karesansui.lib.utils import load_locale
from karesansui.lib.utils import execute_command
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-n', '--name', dest='name', help=_('Network name'))
optp.add_option('-f', '--force', dest='force', action="store_true", help=_('Do everything to bring up network'))
return optp.parse_args()
def chkopts(opts):
if not opts.name:
raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name')
class RestartNetwork(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
conn = KaresansuiVirtConnection(readonly=False)
try:
active_networks = conn.list_active_network()
inactive_networks = conn.list_inactive_network()
if not (opts.name in active_networks or opts.name in inactive_networks):
raise KssCommandException('Could not find the specified network. - net=%s' % (opts.name))
self.up_progress(10)
try:
conn.stop_network(opts.name)
except KaresansuiVirtException, e:
if opt.force is not True:
raise KssCommandException('Could not stop the specified network. - net=%s' % (opts.name))
self.up_progress(20)
try:
conn.start_network(opts.name)
except KaresansuiVirtException, e:
if opts.force is not True:
raise KssCommandException('Could not start the specified network. - net=%s' % (opts.name))
# try to bring down existing bridge
kvn = conn.search_kvn_networks(opts.name)[0]
try:
bridge_name = kvn.get_info()['bridge']['name']
except KeyError:
pass
ret, res = execute_command([NETWORK_IFCONFIG_COMMAND, bridge_name, 'down'])
ret, res = execute_command([NETWORK_BRCTL_COMMAND, 'delbr', bridge_name])
# try again
conn.start_network(opts.name)
self.up_progress(10)
if not (opts.name in conn.list_active_network()):
raise KssCommandException('Failed to start network. - net=%s' % (opts.name))
self.logger.info('Restarted network. - net=%s' % (opts.name))
print >>sys.stdout, _('Restarted network. - net=%s') % (opts.name)
return True
finally:
conn.close()
if __name__ == "__main__":
target = RestartNetwork()
sys.exit(target.run())
| mit |
PwnArt1st/searx | tests/unit/engines/test_youtube_api.py | 13 | 3848 | from collections import defaultdict
import mock
from searx.engines import youtube_api
from searx.testing import SearxTestCase
class TestYoutubeAPIEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 0
dicto['language'] = 'fr_FR'
params = youtube_api.request(query, dicto)
self.assertTrue('url' in params)
self.assertTrue(query in params['url'])
self.assertIn('googleapis.com', params['url'])
self.assertIn('youtube', params['url'])
self.assertIn('fr', params['url'])
dicto['language'] = 'all'
params = youtube_api.request(query, dicto)
self.assertFalse('fr' in params['url'])
def test_response(self):
self.assertRaises(AttributeError, youtube_api.response, None)
self.assertRaises(AttributeError, youtube_api.response, [])
self.assertRaises(AttributeError, youtube_api.response, '')
self.assertRaises(AttributeError, youtube_api.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(youtube_api.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(youtube_api.response(response), [])
json = """
{
"kind": "youtube#searchListResponse",
"etag": "xmg9xJZuZD438sF4hb-VcBBREXc/YJQDcTBCDcaBvl-sRZJoXdvy1ME",
"nextPageToken": "CAUQAA",
"pageInfo": {
"totalResults": 1000000,
"resultsPerPage": 20
},
"items": [
{
"kind": "youtube#searchResult",
"etag": "xmg9xJZuZD438sF4hb-VcBBREXc/IbLO64BMhbHIgWLwLw7MDYe7Hs4",
"id": {
"kind": "youtube#video",
"videoId": "DIVZCPfAOeM"
},
"snippet": {
"publishedAt": "2015-05-29T22:41:04.000Z",
"channelId": "UCNodmx1ERIjKqvcJLtdzH5Q",
"title": "Title",
"description": "Description",
"thumbnails": {
"default": {
"url": "https://i.ytimg.com/vi/DIVZCPfAOeM/default.jpg"
},
"medium": {
"url": "https://i.ytimg.com/vi/DIVZCPfAOeM/mqdefault.jpg"
},
"high": {
"url": "https://i.ytimg.com/vi/DIVZCPfAOeM/hqdefault.jpg"
}
},
"channelTitle": "MinecraftUniverse",
"liveBroadcastContent": "none"
}
}
]
}
"""
response = mock.Mock(text=json)
results = youtube_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'https://www.youtube.com/watch?v=DIVZCPfAOeM')
self.assertEqual(results[0]['content'], 'Description')
self.assertEqual(results[0]['thumbnail'], 'https://i.ytimg.com/vi/DIVZCPfAOeM/hqdefault.jpg')
self.assertTrue('DIVZCPfAOeM' in results[0]['embedded'])
json = """
{
"kind": "youtube#searchListResponse",
"etag": "xmg9xJZuZD438sF4hb-VcBBREXc/YJQDcTBCDcaBvl-sRZJoXdvy1ME",
"nextPageToken": "CAUQAA",
"pageInfo": {
"totalResults": 1000000,
"resultsPerPage": 20
}
}
"""
response = mock.Mock(text=json)
results = youtube_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{"toto":{"entry":[]
}
}
"""
response = mock.Mock(text=json)
results = youtube_api.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
| agpl-3.0 |
aquarimeter/aquarimeter | lib/python2.7/site-packages/pkg_resources.py | 134 | 99605 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
| apache-2.0 |
aleonliao/depot_tools | download_from_google_storage.py | 15 | 20248 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download files from Google Storage based on SHA1 sums."""
import hashlib
import optparse
import os
import Queue
import re
import shutil
import stat
import sys
import tarfile
import threading
import time
import subprocess2
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
# Maps sys.platform to what we actually want to call them.
PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
}
class FileNotFoundError(IOError):
pass
class InvalidFileError(IOError):
pass
class InvalidPlatformError(Exception):
pass
def GetNormalizedPlatform():
"""Returns the result of sys.platform accounting for cygwin.
Under cygwin, this will always return "win32" like the native Python."""
if sys.platform == 'cygwin':
return 'win32'
return sys.platform
# Common utilities
class Gsutil(object):
"""Call gsutil with some predefined settings. This is a convenience object,
and is also immutable."""
def __init__(self, path, boto_path=None, timeout=None, version='4.15'):
if not os.path.exists(path):
raise FileNotFoundError('GSUtil not found in %s' % path)
self.path = path
self.timeout = timeout
self.boto_path = boto_path
self.version = version
def get_sub_env(self):
env = os.environ.copy()
if self.boto_path == os.devnull:
env['AWS_CREDENTIAL_FILE'] = ''
env['BOTO_CONFIG'] = ''
elif self.boto_path:
env['AWS_CREDENTIAL_FILE'] = self.boto_path
env['BOTO_CONFIG'] = self.boto_path
return env
def call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def check_call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
((out, err), code) = subprocess2.communicate(
cmd,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE,
env=self.get_sub_env(),
timeout=self.timeout)
# Parse output.
status_code_match = re.search('status=([0-9]+)', err)
if status_code_match:
return (int(status_code_match.group(1)), out, err)
if ('You are attempting to access protected data with '
'no configured credentials.' in err):
return (403, out, err)
if 'matched no objects' in err:
return (404, out, err)
return (code, out, err)
def check_platform(target):
"""Checks if any parent directory of target matches (win|mac|linux)."""
assert os.path.isabs(target)
root, target_name = os.path.split(target)
if not target_name:
return None
if target_name in ('linux', 'mac', 'win'):
return target_name
return check_platform(root)
def get_sha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
# Download-specific code starts here
def enumerate_work_queue(input_filename, work_queue, directory,
recursive, ignore_errors, output, sha1_file,
auto_platform):
if sha1_file:
if not os.path.exists(input_filename):
if not ignore_errors:
raise FileNotFoundError('%s not found.' % input_filename)
print >> sys.stderr, '%s not found.' % input_filename
with open(input_filename, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put((sha1_match.groups(1)[0], output))
return 1
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % input_filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename
return 0
if not directory:
work_queue.put((input_filename, output))
return 1
work_queue_size = 0
for root, dirs, files in os.walk(input_filename):
if not recursive:
for item in dirs[:]:
dirs.remove(item)
else:
for exclude in ['.svn', '.git']:
if exclude in dirs:
dirs.remove(exclude)
for filename in files:
full_path = os.path.join(root, filename)
if full_path.endswith('.sha1'):
if auto_platform:
# Skip if the platform does not match.
target_platform = check_platform(os.path.abspath(full_path))
if not target_platform:
err = ('--auto_platform passed in but no platform name found in '
'the path of %s' % full_path)
if not ignore_errors:
raise InvalidFileError(err)
print >> sys.stderr, err
continue
current_platform = PLATFORM_MAPPING[sys.platform]
if current_platform != target_platform:
continue
with open(full_path, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put(
(sha1_match.groups(1)[0], full_path.replace('.sha1', '')))
work_queue_size += 1
else:
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % filename
return work_queue_size
def _validate_tar_file(tar, prefix):
def _validate(tarinfo):
"""Returns false if the tarinfo is something we explicitly forbid."""
if tarinfo.issym() or tarinfo.islnk():
return False
if '..' in tarinfo.name or not tarinfo.name.startswith(prefix):
return False
return True
return all(map(_validate, tar.getmembers()))
def _downloader_worker_thread(thread_num, q, force, base_url,
gsutil, out_q, ret_codes, verbose, extract,
delete=True):
while True:
input_sha1_sum, output_filename = q.get()
if input_sha1_sum is None:
return
if os.path.exists(output_filename) and not force:
if get_sha1(output_filename) == input_sha1_sum:
if verbose:
out_q.put(
'%d> File %s exists and SHA1 matches. Skipping.' % (
thread_num, output_filename))
continue
# Check if file exists.
file_url = '%s/%s' % (base_url, input_sha1_sum)
(code, _, err) = gsutil.check_call('ls', file_url)
if code != 0:
if code == 404:
out_q.put('%d> File %s for %s does not exist, skipping.' % (
thread_num, file_url, output_filename))
ret_codes.put((1, 'File %s for %s does not exist.' % (
file_url, output_filename)))
else:
# Other error, probably auth related (bad ~/.boto, etc).
out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % (
thread_num, file_url, output_filename, err))
ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % (
file_url, output_filename, err)))
continue
# Fetch the file.
out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
try:
if delete:
os.remove(output_filename) # Delete the file if it exists already.
except OSError:
if os.path.exists(output_filename):
out_q.put('%d> Warning: deleting %s failed.' % (
thread_num, output_filename))
code, _, err = gsutil.check_call('cp', file_url, output_filename)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
continue
remote_sha1 = get_sha1(output_filename)
if remote_sha1 != input_sha1_sum:
msg = ('%d> ERROR remote sha1 (%s) does not match expected sha1 (%s).' %
(thread_num, remote_sha1, input_sha1_sum))
out_q.put(msg)
ret_codes.put((20, msg))
continue
if extract:
if (not tarfile.is_tarfile(output_filename)
or not output_filename.endswith('.tar.gz')):
out_q.put('%d> Error: %s is not a tar.gz archive.' % (
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
with tarfile.open(output_filename, 'r:gz') as tar:
dirname = os.path.dirname(os.path.abspath(output_filename))
extract_dir = output_filename[0:len(output_filename)-7]
if not _validate_tar_file(tar, os.path.basename(extract_dir)):
out_q.put('%d> Error: %s contains files outside %s.' % (
thread_num, output_filename, extract_dir))
ret_codes.put((1, '%s contains invalid entries.' % (output_filename)))
continue
if os.path.exists(extract_dir):
try:
shutil.rmtree(extract_dir)
out_q.put('%d> Removed %s...' % (thread_num, extract_dir))
except OSError:
out_q.put('%d> Warning: Can\'t delete: %s' % (
thread_num, extract_dir))
ret_codes.put((1, 'Can\'t delete %s.' % (extract_dir)))
continue
out_q.put('%d> Extracting %d entries from %s to %s' %
(thread_num, len(tar.getmembers()),output_filename,
extract_dir))
tar.extractall(path=dirname)
# Set executable bit.
if sys.platform == 'cygwin':
# Under cygwin, mark all files as executable. The executable flag in
# Google Storage will not be set when uploading from Windows, so if
# this script is running under cygwin and we're downloading an
# executable, it will be unrunnable from inside cygwin without this.
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
elif sys.platform != 'win32':
# On non-Windows platforms, key off of the custom header
# "x-goog-meta-executable".
code, out, _ = gsutil.check_call('stat', file_url)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
elif re.search(r'executable:\s*1', out):
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
def printer_worker(output_queue):
while True:
line = output_queue.get()
# Its plausible we want to print empty lines.
if line is None:
break
print line
def download_from_google_storage(
input_filename, base_url, gsutil, num_threads, directory, recursive,
force, output, ignore_errors, sha1_file, verbose, auto_platform, extract):
# Start up all the worker threads.
all_threads = []
download_start = time.time()
stdout_queue = Queue.Queue()
work_queue = Queue.Queue()
ret_codes = Queue.Queue()
ret_codes.put((0, None))
for thread_num in range(num_threads):
t = threading.Thread(
target=_downloader_worker_thread,
args=[thread_num, work_queue, force, base_url,
gsutil, stdout_queue, ret_codes, verbose, extract])
t.daemon = True
t.start()
all_threads.append(t)
printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
printer_thread.daemon = True
printer_thread.start()
# Enumerate our work queue.
work_queue_size = enumerate_work_queue(
input_filename, work_queue, directory, recursive,
ignore_errors, output, sha1_file, auto_platform)
for _ in all_threads:
work_queue.put((None, None)) # Used to tell worker threads to stop.
# Wait for all downloads to finish.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if verbose and not max_ret_code:
print 'Success!'
if verbose:
print 'Downloading %d files took %1f second(s)' % (
work_queue_size, time.time() - download_start)
return max_ret_code
def main(args):
usage = ('usage: %prog [options] target\n'
'Target must be:\n'
' (default) a sha1 sum ([A-Za-z0-9]{40}).\n'
' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on '
'the first line.\n'
' (-d or --directory) A directory to scan for .sha1 files.')
parser = optparse.OptionParser(usage)
parser.add_option('-o', '--output',
help='Specify the output file name. Defaults to: '
'(a) Given a SHA1 hash, the name is the SHA1 hash. '
'(b) Given a .sha1 file or directory, the name will '
'match (.*).sha1.')
parser.add_option('-b', '--bucket',
help='Google Storage bucket to fetch from.')
parser.add_option('-e', '--boto',
help='Specify a custom boto file.')
parser.add_option('-c', '--no_resume', action='store_true',
help='Resume download if file is partially downloaded.')
parser.add_option('-f', '--force', action='store_true',
help='Force download even if local file exists.')
parser.add_option('-i', '--ignore_errors', action='store_true',
help='Don\'t throw error if we find an invalid .sha1 file.')
parser.add_option('-r', '--recursive', action='store_true',
help='Scan folders recursively for .sha1 files. '
'Must be used with -d/--directory')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of downloader threads to run.')
parser.add_option('-d', '--directory', action='store_true',
help='The target is a directory. '
'Cannot be used with -s/--sha1_file.')
parser.add_option('-s', '--sha1_file', action='store_true',
help='The target is a file containing a sha1 sum. '
'Cannot be used with -d/--directory.')
parser.add_option('-g', '--config', action='store_true',
help='Alias for "gsutil config". Run this if you want '
'to initialize your saved Google Storage '
'credentials. This will create a read-only '
'credentials file in ~/.boto.depot_tools.')
parser.add_option('-n', '--no_auth', action='store_true',
help='Skip auth checking. Use if it\'s known that the '
'target bucket is a public bucket.')
parser.add_option('-p', '--platform',
help='A regular expression that is compared against '
'Python\'s sys.platform. If this option is specified, '
'the download will happen only if there is a match.')
parser.add_option('-a', '--auto_platform',
action='store_true',
help='Detects if any parent folder of the target matches '
'(linux|mac|win). If so, the script will only '
'process files that are in the paths that '
'that matches the current platform.')
parser.add_option('-u', '--extract',
action='store_true',
help='Extract a downloaded tar.gz file. '
'Leaves the tar.gz file around for sha1 verification'
'If a directory with the same name as the tar.gz '
'file already exists, is deleted (to get a '
'clean state in case of update.)')
parser.add_option('-v', '--verbose', action='store_true', default=True,
help='DEPRECATED: Defaults to True. Use --no-verbose '
'to suppress.')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='Suppresses diagnostic and progress information.')
(options, args) = parser.parse_args()
# Make sure we should run at all based on platform matching.
if options.platform:
if options.auto_platform:
parser.error('--platform can not be specified with --auto_platform')
if not re.match(options.platform, GetNormalizedPlatform()):
if options.verbose:
print('The current platform doesn\'t match "%s", skipping.' %
options.platform)
return 0
# Set the boto file to /dev/null if we don't need auth.
if options.no_auth:
if (set(('http_proxy', 'https_proxy')).intersection(
env.lower() for env in os.environ) and
'NO_AUTH_BOTO_CONFIG' not in os.environ):
print >> sys.stderr, ('NOTICE: You have PROXY values set in your '
'environment, but gsutil in depot_tools does not '
'(yet) obey them.')
print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG '
'environment variable from being used.')
print >> sys.stderr, ('To use a proxy in this situation, please supply '
'those settings in a .boto file pointed to by '
'the NO_AUTH_BOTO_CONFIG environment var.')
options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull)
# Make sure gsutil exists where we expect it to.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH,
boto_path=options.boto)
else:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
# Passing in -g/--config will run our copy of GSUtil, then quit.
if options.config:
print '===Note from depot_tools==='
print 'If you do not have a project ID, enter "0" when asked for one.'
print '===End note from depot_tools==='
print
return gsutil.call('config')
if not args:
parser.error('Missing target.')
if len(args) > 1:
parser.error('Too many targets.')
if not options.bucket:
parser.error('Missing bucket. Specify bucket with --bucket.')
if options.sha1_file and options.directory:
parser.error('Both --directory and --sha1_file are specified, '
'can only specify one.')
if options.recursive and not options.directory:
parser.error('--recursive specified but --directory not specified.')
if options.output and options.directory:
parser.error('--directory is specified, so --output has no effect.')
if (not (options.sha1_file or options.directory)
and options.auto_platform):
parser.error('--auto_platform must be specified with either '
'--sha1_file or --directory')
input_filename = args[0]
# Set output filename if not specified.
if not options.output and not options.directory:
if not options.sha1_file:
# Target is a sha1 sum, so output filename would also be the sha1 sum.
options.output = input_filename
elif options.sha1_file:
# Target is a .sha1 file.
if not input_filename.endswith('.sha1'):
parser.error('--sha1_file is specified, but the input filename '
'does not end with .sha1, and no --output is specified. '
'Either make sure the input filename has a .sha1 '
'extension, or specify --output.')
options.output = input_filename[:-5]
else:
parser.error('Unreachable state.')
# Check if output file already exists.
if not options.directory and not options.force and not options.no_resume:
if os.path.exists(options.output):
parser.error('Output file %s exists and --no_resume is specified.'
% options.output)
base_url = 'gs://%s' % options.bucket
return download_from_google_storage(
input_filename, base_url, gsutil, options.num_threads, options.directory,
options.recursive, options.force, options.output, options.ignore_errors,
options.sha1_file, options.verbose, options.auto_platform,
options.extract)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
qnzhou/ThingiverseCrawler | thingiverse_crawler.py | 1 | 9320 | #!//usr/bin/env python
import argparse
import datetime
import os
import os.path
import requests
import re
import time
import urllib
import urlparse
from subprocess import check_call
def utc_mktime(utc_tuple):
"""Returns number of seconds elapsed since epoch
Note that no timezone are taken into consideration.
utc tuple must be: (year, month, day, hour, minute, second)
"""
if len(utc_tuple) == 6:
utc_tuple += (0, 0, 0)
return time.mktime(utc_tuple) - time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0))
def datetime_to_timestamp(dt):
"""Converts a datetime object to UTC timestamp"""
return int(utc_mktime(dt.timetuple()))
def parse_thing_ids(text):
pattern = "thing:(\d{5,7})"
matched = re.findall(pattern, text)
return [int(val) for val in matched]
def parse_file_ids(text):
pattern = "download:(\d{5,7})"
matched = re.findall(pattern, text)
return [int(val) for val in matched]
known_licenses = [
("Creative Commons - Attribution",
re.compile("http://creativecommons.org/licenses/by/\d(.\d)?/")),
("Creative Commons - Attribution - Share Alike",
re.compile("http://creativecommons.org/licenses/by-sa/\d(.\d)?/")),
("Creative Commons - Attribution - No Derivatives",
re.compile("http://creativecommons.org/licenses/by-nd/\d(.\d)?/")),
("Creative Commons - Attribution - Non-Commercial",
re.compile("http://creativecommons.org/licenses/by-nc/\d(.\d)?/")),
("Attribution - Non-Commercial - Share Alike",
re.compile("http://creativecommons.org/licenses/by-nc-sa/\d(.\d)?/")),
("Attribution - Non-Commercial - No Derivatives",
re.compile("http://creativecommons.org/licenses/by-nc-nd/\d(.\d)?/")),
("Creative Commons - Public Domain Dedication",
re.compile("http://creativecommons.org/publicdomain/zero/\d(.\d)?/")),
("GNU - GPL",
re.compile("http://creativecommons.org/licenses/GPL/\d(.\d)?/")),
("GNU - LGPL",
re.compile("http://creativecommons.org/licenses/LGPL/\d(.\d)?/")),
("BSD License",
re.compile("http://creativecommons.org/licenses/BSD/")),
("Nokia",
re.compile("http://www.developer.nokia.com/Terms_and_conditions/3d-printing.xhtml")),
("Public Domain",
re.compile("http://creativecommons.org/licenses/publicdomain/")),
]
def parse_license(text):
for name, pattern in known_licenses:
if pattern.search(text):
return name
return "unknown_license"
def crawl_thing_ids(N, end_date=None):
""" This method extract N things that were uploaded to thingiverse.com
before end_date. If end_date is None, use today's date.
"""
baseurl = "http://www.thingiverse.com/search/recent/things/page:{}?q=&start_date=&stop_date={}&search_mode=advanced&description=&username=&tags=&license="
end_date = datetime_to_timestamp(end_date)
thing_ids = set()
for i in range(N/12 + 1):
url = baseurl.format(i, end_date)
r = requests.get(url)
assert(r.status_code==200)
thing_ids.update(parse_thing_ids(r.text))
if len(thing_ids) > N:
break
# Sleep a bit to avoid being mistaken as DoS.
time.sleep(0.5)
return thing_ids
def crawl_things(N, output_dir, term=None, category=None, source=None, organize=False):
#baseurl = "http://www.thingiverse.com/newest/page:{}"
#baseurl = "http://www.thingiverse.com/explore/popular/page:{}"
key = None
if term is None:
assert(source is not None);
url_prefix= "http://www.thingiverse.com/explore/{}/".format(source);
if category is None:
baseurl = url_prefix + "page:{}"
else:
baseurl = url_prefix + urllib.quote_plus(category) + "/page:{}"
key = category
else:
baseurl = "http://www.thingiverse.com/search/page:{}?type=things&q=" + urllib.quote_plus(term)
key = term
thing_ids = set()
file_ids = set()
records = []
num_files = 0
page = 0
previous_path = ''
while True:
url = baseurl.format(page+1)
contents = get_url(url)
page += 1
# If the previous url ends up being the same as the old one, we should stop as there are no more pages
current_path = urlparse.urlparse(contents.url).path
if previous_path == current_path:
return records
else:
previous_path = current_path
for thing_id in parse_thing_ids(contents.text):
if thing_id in thing_ids:
continue
print("thing id: {}".format(thing_id))
thing_ids.add(thing_id)
license, thing_files = get_thing(thing_id)
for file_id in thing_files:
if file_id in file_ids:
continue
file_ids.add(file_id)
print(" file id: {}".format(file_id))
result = download_file(file_id, thing_id, output_dir, organize)
if result is None: continue
filename, link = result
if filename is not None:
records.append((thing_id, file_id, filename, license, link))
if N is not None and len(records) >= N:
return records
# Sleep a bit to avoid being mistaken as DoS.
time.sleep(0.5)
save_records(records, key)
def get_thing(thing_id):
base_url = "http://www.thingiverse.com/{}:{}"
file_ids = []
url = base_url.format("thing", thing_id)
contents = get_url(url).text
license = parse_license(contents)
return license, parse_file_ids(contents)
def get_url(url, time_out=600):
r = requests.get(url)
sleep_time = 1.0
while r.status_code != 200:
print("sleep {}s".format(sleep_time))
print(url)
time.sleep(sleep_time)
r = requests.get(url)
sleep_time += 2
if (sleep_time > time_out):
# We have sleeped for over 10 minutes, the page probably does
# not exist.
break
if r.status_code != 200:
print("failed to retrieve {}".format(url))
else:
return r
# return r.text
def get_download_link(file_id):
base_url = "https://www.thingiverse.com/{}:{}"
url = base_url.format("download", file_id)
r = requests.head(url)
link = r.headers.get("Location", None)
if link is not None:
__, ext = os.path.splitext(link)
if ext.lower() not in [".stl", ".obj", ".ply", ".off"]:
return None
return link
def download_file(file_id, thing_id, output_dir, organize):
link = get_download_link(file_id)
if link is None:
return None
__, ext = os.path.splitext(link)
output_file = "{}{}".format(file_id, ext.lower())
if organize:
output_file = os.path.join(str(thing_id), output_file)
output_file = os.path.join(output_dir, output_file)
command = "wget -q --tries=20 --waitretry 20 -O {} {}".format(output_file, link)
#check_call(command.split())
return output_file, link
def save_records(records, key=None):
# Enforce kebab case file name
output_name = re.sub('(\w) (\w)', r'\1-\2', key).lower()+"-" if key else ""
output_name += "summary"
with open(output_name+".csv", 'w') as fout:
fout.write("thing_id, file_id, file, license, link\n")
for entry in records:
fout.write(",".join([str(val) for val in entry]) + "\n")
def parse_args():
parser = argparse.ArgumentParser(
description="Crawl data from thingiverse",
epilog="Written by Qingnan Zhou <qnzhou at gmail dot com> Modified by Mike Gleason")
parser.add_argument("--output-dir", "-o", help="output directories",
default=".")
parser.add_argument("--number", "-n", type=int,
help="how many files to crawl", default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--search-term", "-s", type=str, default=None,
help="term to search for")
group.add_argument("--category", "-c", type=str, default=None,
help="catergory to search for")
parser.add_argument('--organize', dest='organized', default=False, action='store_true',
help="organize files by their main category")
parser.add_argument("--source", choices=("newest", "featured", "popular",
"verified", "made-things", "derivatives", "customizable",
"random-things", "firehose"), default="featured");
return parser
def main():
parser = parse_args()
args = parser.parse_args()
if args.number is None and (args.search_term is None and args.category is None):
parser.error('Number or Search/Category Term required')
output_dir = args.output_dir
number = args.number
records = crawl_things(
args.number,
output_dir,
args.search_term,
args.category,
args.source,
args.organized)
if args.search_term:
save_records(records, args.search_term)
elif args.category:
save_records(records, args.category)
else:
save_records(records)
if __name__ == "__main__":
main()
| mit |
ryankurte/mbed-os | tools/host_tests/tcpecho_server_loop.py | 73 | 1349 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Be sure that the tools directory is in the search path
import sys
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__), "..", ".."))
sys.path.insert(0, ROOT)
from mbed_settings import LOCALHOST
from SocketServer import BaseRequestHandler, TCPServer
class TCP_EchoHandler(BaseRequestHandler):
def handle(self):
print "\nHandle connection from:", self.client_address
while True:
data = self.request.recv(1024)
if not data: break
self.request.sendall(data)
self.request.close()
print "socket closed"
if __name__ == '__main__':
server = TCPServer((LOCALHOST, 7), TCP_EchoHandler)
print "listening for connections on:", (LOCALHOST, 7)
server.serve_forever()
| apache-2.0 |
PyBossa/pybossa | pybossa/model/counter.py | 2 | 1787 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2017 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import Integer
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.dialects.postgresql import TIMESTAMP
from pybossa.core import db
from pybossa.model import DomainObject, make_timestamp
class Counter(db.Model, DomainObject):
'''A Counter lists the number of task runs for a given Task.'''
__tablename__ = 'counter'
#: Counter.ID
id = Column(Integer, primary_key=True)
#: UTC timestamp when the counter was created.
created = Column(TIMESTAMP, default=make_timestamp)
#: Project.ID that this counter is associated with.
project_id = Column(Integer, ForeignKey('project.id',
ondelete='CASCADE'),
nullable=False)
#: Task.ID that this counter is associated with.
task_id = Column(Integer, ForeignKey('task.id',
ondelete='CASCADE'),
nullable=False)
#: Number of task_runs for this task.
n_task_runs = Column(Integer, default=0, nullable=False)
| agpl-3.0 |
pplatek/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/logreport.py | 386 | 1736 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import tempfile
LOG_DEBUG='debug'
LOG_INFO='info'
LOG_WARNING='warn'
LOG_ERROR='error'
LOG_CRITICAL='critical'
_logger = logging.getLogger(__name__)
def log_detail(self):
import os
logfile_name = os.path.join(tempfile.gettempdir(), "openerp_report_designer.log")
hdlr = logging.FileHandler(logfile_name)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_logger.addHandler(hdlr)
_logger.setLevel(logging.INFO)
class Logger(object):
def log_write(self, name, level, msg):
getattr(_logger,level)(msg)
def shutdown(self):
logging.shutdown()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arpitn30/open-event-orga-server | tests/unittests/api/test_custom_fields.py | 9 | 3404 | import unittest
from app import current_app as app
from app.api.helpers.custom_fields import Color, Email, Uri, \
ImageUri, DateTime, Integer, Float, ChoiceString, Upload
from tests.unittests.utils import OpenEventTestCase
class TestCustomFieldsValidation(OpenEventTestCase):
"""
Test the validation methods of custom fields
"""
def _test_common(self, field):
field.required = False
self.assertTrue(field.validate(None))
field.required = True
self.assertFalse(field.validate(None))
if field.__schema_type__ != 'string':
self.assertFalse(field.validate(''))
def test_color_field(self):
field = Color()
self._test_common(field)
self.assertFalse(field.validate('randomnothing'))
self.assertTrue(field.validate('black'))
self.assertTrue(field.validate('#44ff3b'))
def test_email_field(self):
field = Email()
self._test_common(field)
self.assertFalse(field.validate('website.com'))
self.assertTrue(field.validate('[email protected]'))
def test_uri_field(self):
field = Uri()
self._test_common(field)
self.assertFalse(field.validate('somestring'))
self.assertFalse(field.validate('website.com'))
self.assertFalse(field.validate('www.website.com'))
self.assertFalse(field.validate('http://bazooka'))
self.assertTrue(field.validate('http://localhost/file'))
self.assertTrue(field.validate('http://website.com'))
self.assertTrue(field.validate('ftp://domain.com/blah'))
def test_image_uri_field(self):
field = ImageUri()
self._test_common(field)
# same as uri field, not many tests needed
self.assertFalse(field.validate('imgur.com/image.png'))
self.assertTrue(field.validate('http://imgur.com/image.png'))
def test_datetime_field(self):
field = DateTime()
self._test_common(field)
self.assertTrue(field.validate('2014-12-31 23:11:44'))
self.assertTrue(field.validate('2014-12-31T23:11:44'))
self.assertFalse(field.validate('2014-31-12T23:11:44'))
self.assertFalse(field.validate('2014-12-32'))
self.assertFalse(field.validate('2014-06-30 12:00'))
def test_integer_field(self):
field = Integer()
self._test_common(field)
self.assertTrue(field.validate(0))
self.assertFalse(field.validate(-2323.23))
self.assertFalse(field.validate(2323.23))
def test_float_field(self):
field = Float()
self._test_common(field)
self.assertTrue(field.validate(92))
def test_choice_string_field(self):
field = ChoiceString(choice_list=['a', 'b', 'c'])
self._test_common(field)
self.assertTrue(field.validate('a'))
self.assertFalse(field.validate('d'))
self.assertFalse(field.validate('ab'))
def test_upload_field(self):
with app.test_request_context():
field = Upload()
self._test_common(field)
link = '/static/1'
self.assertTrue(field.validate(link))
z = field.format(link)
self.assertNotEqual(link, z)
self.assertTrue(field.validate(z), msg=z)
self.assertEqual('http://site.co/1', field.format('http://site.co/1'))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
marbu/pylatest | tests/xdocutils/test_utils.py | 1 | 6663 | # -*- coding: utf8 -*-
"""
Tests of helper functions from pylatest.xdocutils.utils module.
"""
# Copyright (C) 2018 Martin Bukatovič <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from docutils.core import publish_doctree
import pytest
from pylatest.xdocutils.core import pylatest_publish_parts
from pylatest.xdocutils.readers import NoDocInfoReader
from pylatest.xdocutils.utils import get_field_list
from pylatest.xdocutils.utils import get_testcase_id
from pylatest.xdocutils.utils import get_testcase_requirements
def _publish(source):
"""
Parse rst source string into doctree.
"""
doctree = publish_doctree(
source=source,
reader=NoDocInfoReader(),
parser_name='restructuredtext',)
return doctree
def test_get_field_list_null(empty_doctree):
assert get_field_list(empty_doctree) == None
def test_get_field_list_missing():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
There is no field list.
Description
===========
Nothing here as well.
'''))
assert get_field_list(doctree) == None
def test_get_field_list_present():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:id: FOO-122
:author: [email protected]
:component: foo
'''))
assert get_field_list(doctree) is not None
def test_get_testcase_id_null(empty_doctree):
assert get_testcase_id(empty_doctree) == None
def test_get_testcase_id():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:id: FOO-122
:author: [email protected]
:component: foo
'''))
assert get_testcase_id(doctree) == "FOO-122"
#
# requirements
#
def test_get_testcase_requirements_null(empty_doctree):
assert get_testcase_requirements(empty_doctree) == []
REQUIREMENT_FIELD_NAMES = ["requirement", "requirements"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_single(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}: FOO-212
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-212"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_single_empty(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
'''.format(field_name)))
assert get_testcase_requirements(doctree) == []
def test_get_testcase_requirements_many():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:requirement: FOO-212
:requirement: FOO-232
:component: foo
'''))
assert get_testcase_requirements(doctree) == ["FOO-212", "FOO-232"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_single(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
- FOO-212
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-212"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_many(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
- FOO-212
- FOO-232
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-212", "FOO-232"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_many_someemptyitems(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
-
- FOO-132
-
:requirement: FOO-130
'''.format(field_name)))
assert get_testcase_requirements(doctree) == ["FOO-132", "FOO-130"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_list_many_onlyemptyitems(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:{}:
-
-
'''.format(field_name)))
assert get_testcase_requirements(doctree) == []
def test_get_testcase_requirements_many_list_many():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:requirement: FOO-012
:requirement: FOO-032
:requirements:
- FOO-212
- FOO-232
'''))
assert get_testcase_requirements(doctree) == [
"FOO-012", "FOO-032", "FOO-212", "FOO-232"]
@pytest.mark.parametrize("field_name", REQUIREMENT_FIELD_NAMES)
def test_get_testcase_requirements_single_url_link(field_name):
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:requirement: https://example.com
'''.format(field_name)))
results = get_testcase_requirements(doctree)
assert len(results) == 1
# check that we get actual rst node for a link (reference node)
assert results[0].tagname == "reference"
assert results[0].astext() == "https://example.com"
def test_get_testcase_requirements_many_list_url_link():
doctree = _publish(textwrap.dedent('''\
Test Foo
********
:author: [email protected]
:component: foo
:requirements:
- https://example.com/foo
- https://example.com/bar
'''))
results = get_testcase_requirements(doctree)
assert len(results) == 2
# check that we get actual rst node for a link (reference node)
assert results[0].tagname == "reference"
assert results[1].tagname == "reference"
# and expected content
assert results[0].astext() == "https://example.com/foo"
assert results[1].astext() == "https://example.com/bar"
| gpl-3.0 |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/openshift_health_checker/test/etcd_volume_test.py | 55 | 3964 | import pytest
from openshift_checks.etcd_volume import EtcdVolume
from openshift_checks import OpenShiftCheckException
@pytest.mark.parametrize('ansible_mounts,extra_words', [
([], ['none']), # empty ansible_mounts
([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
])
def test_cannot_determine_available_disk(ansible_mounts, extra_words):
task_vars = dict(
ansible_mounts=ansible_mounts,
)
with pytest.raises(OpenShiftCheckException) as excinfo:
EtcdVolume(fake_execute_module, task_vars).run()
for word in ['Unable to determine mount point'] + extra_words:
assert word in str(excinfo.value)
@pytest.mark.parametrize('size_limit,ansible_mounts', [
(
# if no size limit is specified, expect max usage
# limit to default to 90% of size_total
None,
[{
'mount': '/',
'size_available': 40 * 10**9,
'size_total': 80 * 10**9
}],
),
(
1,
[{
'mount': '/',
'size_available': 30 * 10**9,
'size_total': 30 * 10**9,
}],
),
(
20000000000,
[{
'mount': '/',
'size_available': 20 * 10**9,
'size_total': 40 * 10**9,
}],
),
(
5000000000,
[{
# not enough space on / ...
'mount': '/',
'size_available': 0,
'size_total': 0,
}, {
# not enough space on /var/lib ...
'mount': '/var/lib',
'size_available': 2 * 10**9,
'size_total': 21 * 10**9,
}, {
# ... but enough on /var/lib/etcd
'mount': '/var/lib/etcd',
'size_available': 36 * 10**9,
'size_total': 40 * 10**9
}],
)
])
def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts):
task_vars = dict(
etcd_device_usage_threshold_percent=size_limit,
ansible_mounts=ansible_mounts,
)
if task_vars["etcd_device_usage_threshold_percent"] is None:
task_vars.pop("etcd_device_usage_threshold_percent")
result = EtcdVolume(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
@pytest.mark.parametrize('size_limit_percent,ansible_mounts,extra_words', [
(
# if no size limit is specified, expect max usage
# limit to default to 90% of size_total
None,
[{
'mount': '/',
'size_available': 1 * 10**9,
'size_total': 100 * 10**9,
}],
['99.0%'],
),
(
70.0,
[{
'mount': '/',
'size_available': 1 * 10**6,
'size_total': 5 * 10**9,
}],
['100.0%'],
),
(
40.0,
[{
'mount': '/',
'size_available': 2 * 10**9,
'size_total': 6 * 10**9,
}],
['66.7%'],
),
(
None,
[{
# enough space on /var ...
'mount': '/var',
'size_available': 20 * 10**9,
'size_total': 20 * 10**9,
}, {
# .. but not enough on /var/lib
'mount': '/var/lib',
'size_available': 1 * 10**9,
'size_total': 20 * 10**9,
}],
['95.0%'],
),
])
def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts, extra_words):
task_vars = dict(
etcd_device_usage_threshold_percent=size_limit_percent,
ansible_mounts=ansible_mounts,
)
if task_vars["etcd_device_usage_threshold_percent"] is None:
task_vars.pop("etcd_device_usage_threshold_percent")
result = EtcdVolume(fake_execute_module, task_vars).run()
assert result['failed']
for word in extra_words:
assert word in result['msg']
def fake_execute_module(*args):
raise AssertionError('this function should not be called')
| apache-2.0 |
opavader/fabric | tests/test_state.py | 44 | 1109 | from nose.tools import eq_
from fabric.state import _AliasDict
def test_dict_aliasing():
"""
Assigning values to aliases updates aliased keys
"""
ad = _AliasDict(
{'bar': False, 'biz': True, 'baz': False},
aliases={'foo': ['bar', 'biz', 'baz']}
)
# Before
eq_(ad['bar'], False)
eq_(ad['biz'], True)
eq_(ad['baz'], False)
# Change
ad['foo'] = True
# After
eq_(ad['bar'], True)
eq_(ad['biz'], True)
eq_(ad['baz'], True)
def test_nested_dict_aliasing():
"""
Aliases can be nested
"""
ad = _AliasDict(
{'bar': False, 'biz': True},
aliases={'foo': ['bar', 'nested'], 'nested': ['biz']}
)
# Before
eq_(ad['bar'], False)
eq_(ad['biz'], True)
# Change
ad['foo'] = True
# After
eq_(ad['bar'], True)
eq_(ad['biz'], True)
def test_dict_alias_expansion():
"""
Alias expansion
"""
ad = _AliasDict(
{'bar': False, 'biz': True},
aliases={'foo': ['bar', 'nested'], 'nested': ['biz']}
)
eq_(ad.expand_aliases(['foo']), ['bar', 'biz'])
| bsd-2-clause |
cubicova17/annet | venv/lib/python2.7/site-packages/django/contrib/gis/gdal/feature.py | 219 | 4255 | # The GDAL C library, OGR exception, and the Field object
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
# ctypes function prototypes
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils.encoding import force_bytes, force_text
from django.utils import six
from django.utils.six.moves import xrange
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
#### Python 'magic' routines ####
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise OGRException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr: capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in xrange(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
#### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in xrange(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
#### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| mit |
Ali-aqrabawi/ezclinic | lib/django/contrib/staticfiles/management/commands/findstatic.py | 106 | 1745 | from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
label = 'staticfile'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--first', action='store_false', dest='all',
default=True,
help="Only return the first match for each static file.",
)
def handle_label(self, path, **options):
verbosity = options['verbosity']
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = (
"\nLooking in the following locations:\n %s" %
"\n ".join(force_text(location) for location in finders.searched_locations)
)
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| mit |
vshtanko/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
mostafa8026/MyObjectListView | docs/conf.py | 2 | 5224 | # -*- coding: utf-8 -*-
#
# ObjectListView documentation build configuration file, created by
# sphinx-quickstart on Sun May 18 14:41:14 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# sys.path.append(os.path.abspath(".."))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath("sphinxext"))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'ObjectListView'
copyright = '2006-2015, Phillip Piper'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '2.9'
# The full version, including alpha/beta/rc tags.
release = '2.9.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'c#'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'master.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ObjectListViewDoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = "a4"
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'ObjectListView.tex', 'ObjectListView Documentation', 'Phillip Piper', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-3.0 |
2014c2g3/0623exam | static/Brython3.1.1-20150328-091302/Lib/threading_1.py | 730 | 45641 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import sleep as _sleep
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value = self._value + 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
break
self._block.wait(delay)
finally:
self._block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| gpl-3.0 |
havard024/prego | venv/lib/python2.7/site-packages/django/middleware/csrf.py | 20 | 8862 | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import hashlib
import logging
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.encoding import force_text
from django.utils.http import same_origin
from django.utils.crypto import constant_time_compare, get_random_string
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's
# available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s',
REASON_NO_REFERER, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port.
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
logger.warning('Forbidden (%s): %s',
REASON_NO_CSRF_COOKIE, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
logger.warning('Forbidden (%s): %s',
REASON_BAD_TOKEN, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
| mit |
gogogo/gogogo-hk | gogogo/models/property.py | 1 | 3233 | from google.appengine.ext import db
from django import forms
from django.utils.translation import ugettext_lazy as _
class TransitTypeProperty(db.IntegerProperty):
"""
Transit Type Property - Storage of transit type
"""
def __init__ (self,*args,**kwargs):
kwargs["choices"] = range(0,8)
db.IntegerProperty.__init__(self,*args,**kwargs)
def validate(self, value):
if isinstance(value,basestring):
value = int(value)
return super(TransitTypeProperty, self).validate(value)
def get_form_field(self, **kwargs):
attrs = {
'form_class': forms.ChoiceField,
'choices' : TransitTypeProperty.get_choices()
}
attrs.update(kwargs)
return super(TransitTypeProperty, self).get_form_field(**attrs)
def get_choices():
ret = [ (i,TransitTypeProperty.get_type_name(i)) for i in range(0,8)]
return ret
get_choices = staticmethod(get_choices)
def get_basic_type_name_list():
"""
Return a list of basic type name
"""
ret = [TransitTypeProperty.get_type_name(i) for i in range(0,8)]
return ret
get_basic_type_name_list = staticmethod(get_basic_type_name_list)
def get_type_name(type):
if type == 0:
return _("Tram, Streetcar, Light rail")
elif type == 1:
return _("Subway, Metro") #Any underground rail system within a metropolitan area
elif type == 2:
return _("Rail") #Used for intercity or long-distance travel.
elif type == 3:
return _("Bus")
elif type == 4:
return _("Ferry")
elif type == 5:
return _("Cable car")
elif type == 6:
return _("Gondola, Suspended cable car")
elif type == 7:
return _("Funicular")
else:
return ""
get_type_name = staticmethod(get_type_name)
class PaymentMethodProperty(db.IntegerProperty):
"""
Payment Method
"""
def __init__ (self,*args,**kwargs):
kwargs["choices"] = range(0,2)
if "default" not in kwargs:
kwargs["default"] = 0
db.IntegerProperty.__init__(self,*args,**kwargs)
def validate(self, value):
if isinstance(value,basestring):
value = int(value)
return super(PaymentMethodProperty, self).validate(value)
def get_form_field(self, **kwargs):
attrs = {
'form_class': forms.ChoiceField,
'choices' : PaymentMethodProperty.get_choices()
}
attrs.update(kwargs)
return super(PaymentMethodProperty, self).get_form_field(**attrs)
def get_choices():
ret = [ (i,PaymentMethodProperty.get_type_name(i)) for i in range(0,2)]
return ret
get_choices = staticmethod(get_choices)
def get_type_name(type):
if type == 0:
return _("Fare is paid on board")
elif type == 1:
return _("Fare must be paid before boarding")
get_type_name = staticmethod(get_type_name)
| agpl-3.0 |
Oliver2213/NVDAYoutube-dl | addon/globalPlugins/nvdaYoutubeDL/lib/xml/dom/xmlbuilder.py | 239 | 12337 | """Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib2
fp = urllib2.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urlparse
parts = urlparse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urlparse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib2
return urllib2.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
| gpl-2.0 |
funkring/fdoo | addons/fetchmail/res_config.py | 437 | 5234 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class fetchmail_config_settings(osv.osv_memory):
""" This wizard can be inherited in conjunction with 'res.config.settings', in order to
define fields that configure a fetchmail server.
It relies on the following convention on the object::
class my_config_settings(osv.osv_memory):
_name = 'my.settings'
_inherit = ['res.config.settings', 'fetchmail.config.settings']
_columns = {
'fetchmail_stuff': fields.boolean(...,
fetchmail_model='my.stuff', fetchmail_name='Incoming Stuff'),
}
def configure_fetchmail_stuff(self, cr, uid, ids, context=None):
return self.configure_fetchmail(cr, uid, 'fetchmail_stuff', context)
and in the form view::
<field name="fetchmail_stuff"/>
<button type="object" name="configure_fetchmail_stuff"/>
The method ``get_default_fetchmail`` determines the value of all fields that start
with 'fetchmail_'. It looks up fetchmail server configurations that match the given
model name (``fetchmail_model``) and are active.
The button action ``configure_fetchmail_stuff`` is caught by the object, and calls
automatically the method ``configure_fetchmail``; it opens the fetchmail server
configuration form for the corresponding field.
"""
_name = 'fetchmail.config.settings'
def get_default_fetchmail(self, cr, uid, fields, context=None):
""" determine the value of all fields like 'fetchmail_XXX' """
ir_model = self.pool.get('ir.model')
fetchmail_server = self.pool.get('fetchmail.server')
fetchmail_fields = [f for f in fields if f.startswith('fetchmail_')]
res = {}
for f in fetchmail_fields:
model_name = self._columns[f].fetchmail_model
model_id = ir_model.search(cr, uid, [('model', '=', model_name)])[0]
server_ids = fetchmail_server.search(cr, uid, [('object_id', '=', model_id), ('state', '=', 'done')])
res[f] = bool(server_ids)
return res
def set_fetchmail(self, cr, uid, ids, context=None):
""" deactivate fetchmail servers for all fields like 'fetchmail_XXX' that are False """
config = self.browse(cr, uid, ids[0], context)
fetchmail_fields = [f for f in self._columns if f.startswith('fetchmail_')]
# determine which models should not have active fetchmail servers, and
# deactivate all active servers for those models
models = [self._columns[f].fetchmail_model for f in fetchmail_fields if not config[f]]
if models:
fetchmail_server = self.pool.get('fetchmail.server')
server_ids = fetchmail_server.search(cr, uid, [('object_id.model', 'in', models), ('state', '=', 'done')])
fetchmail_server.set_draft(cr, uid, server_ids, context)
def configure_fetchmail(self, cr, uid, field, context=None):
""" open the form view of the fetchmail.server to configure """
action = {
'type': 'ir.actions.act_window',
'res_model': 'fetchmail.server',
'view_mode': 'form',
'target': 'current',
}
model_name = self._columns[field].fetchmail_model
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)])[0]
server_ids = self.pool.get('fetchmail.server').search(cr, uid, [('object_id', '=', model_id)])
if server_ids:
action['res_id'] = server_ids[0]
else:
action['context'] = {
'default_name': self._columns[field].fetchmail_name,
'default_object_id': model_id,
}
return action
def __getattr__(self, name):
""" catch calls to 'configure_fetchmail_XXX' """
if name.startswith('configure_fetchmail_'):
return (lambda cr, uid, ids, context=None:
self.configure_fetchmail(cr, uid, name[10:], context))
return super(fetchmail_config_settings, self).__getattr__(name)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shubhdev/openedx | common/djangoapps/heartbeat/views.py | 199 | 1440 | from xmodule.modulestore.django import modulestore
from dogapi import dog_stats_api
from util.json_request import JsonResponse
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.exceptions import HeartbeatFailure
@dog_stats_api.timed('edxapp.heartbeat')
def heartbeat(request):
"""
Simple view that a loadbalancer can check to verify that the app is up. Returns a json doc
of service id: status or message. If the status for any service is anything other than True,
it returns HTTP code 503 (Service Unavailable); otherwise, it returns 200.
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
output = modulestore().heartbeat()
except HeartbeatFailure as fail:
return JsonResponse({fail.service: unicode(fail)}, status=503)
cursor = connection.cursor()
try:
cursor.execute("SELECT CURRENT_DATE")
cursor.fetchone()
output['SQL'] = True
except DatabaseError as fail:
return JsonResponse({'SQL': unicode(fail)}, status=503)
return JsonResponse(output)
| agpl-3.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/__config__.py | 2 | 1289 | # This file is generated by /tmp/pip-6gjs2vkw-build/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_mkl_info={}
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| mit |
franky88/emperioanimesta | env/Lib/site-packages/django/db/migrations/migration.py | 123 | 8324 | from __future__ import unicode_literals
from django.db.transaction import atomic
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import IrreversibleError
@python_2_unicode_compatible
class Migration(object):
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Is this an initial migration? Initial migrations are skipped on
# --fake-initial if the table or fields already exist. If None, check if
# the migration has any dependencies to determine if there are dependencies
# to tell if db introspection needs to be done. If True, always perform
# introspection. If False, never perform introspection.
initial = None
# Whether to wrap the whole migration in a transaction. Only has an effect
# on database backends which support transactional DDL.
atomic = True
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state, preserve=True):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it. Preserves the original object state by
default and will return a mutated state from a copy.
"""
new_state = project_state
if preserve:
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
atomic_operation = operation.atomic or (self.atomic and operation.atomic is not False)
if not schema_editor.atomic_migration and atomic_operation:
# Force a transaction on a non-transactional-DDL backend or an
# atomic operation inside a non-atomic migration.
with atomic(schema_editor.connection.alias):
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
else:
# Normal behaviour
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a reverse order.
The backwards migration process consists of two phases:
1. The intermediate states from right before the first until right
after the last operation inside this migration are preserved.
2. The operations are applied in reverse order using the states
recorded in step 1.
"""
# Construct all the intermediate states we need for a reverse migration
to_run = []
new_state = project_state
# Phase 1
for operation in self.operations:
# If it's irreversible, error out
if not operation.reversible:
raise IrreversibleError("Operation %s in %s is not reversible" % (operation, self))
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
else:
# Normal behaviour
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
return project_state
class SwappableTuple(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""
Turns a setting value into a dependency.
"""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
| gpl-3.0 |
bmazin/ARCONS-pipeline | examples/Pal2014-J0337/hTestLimit.py | 1 | 8356 | #Filename: hTestLimit.py
#Author: Matt Strader
#
#This script opens a list of observed photon phases,
import numpy as np
import tables
import numexpr
import matplotlib.pyplot as plt
import multiprocessing
import functools
import time
from kuiper.kuiper import kuiper,kuiper_FPP
from kuiper.htest import h_test,h_fpp,h_test2
from pulsarUtils import nSigma,plotPulseProfile
from histMetrics import kuiperFpp,hTestFpp
from inverseTransformSampling import inverseTransformSampler
def hTestTrial(iTrial,nPhotons,photonPulseFraction,pulseModel,pulseModelQueryPoints):
np.random.seed(int((time.time()+iTrial)*1e6))
modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints)
nPulsePhotons = int(np.floor(photonPulseFraction*nPhotons))
nBackgroundPhotons = int(np.ceil((1.-photonPulseFraction) * nPhotons))
simPulsePhotons = modelSampler(nPulsePhotons)
#background photons come from a uniform distribution
simBackgroundPhotons = np.random.random(nBackgroundPhotons)
simPhases = np.append(simPulsePhotons,simBackgroundPhotons)
simHDict = h_test2(simPhases)
simH,simM,simPval,simFourierCoeffs = simHDict['H'],simHDict['M'],simHDict['fpp'],simHDict['cs']
print '{} - H,M,fpp,sig:'.format(iTrial),simH,simM,simPval
return {'H':simH,'M':simM,'fpp':simPval}
if __name__=='__main__':
path = '/Scratch/dataProcessing/J0337/masterPhotons3.h5'
wvlStart = 4000.
wvlEnd = 5500.
bLoadFromPl = True
nPhaseBins = 20
hTestPath = '/Scratch/dataProcessing/J0337/hTestResults_withProfiles_{}-{}.npz'.format(wvlStart,wvlEnd)
phaseBinEdges = np.linspace(0.,1.,nPhaseBins+1)
if bLoadFromPl:
photFile = tables.openFile(path,'r')
photTable = photFile.root.photons.photTable
phases = photTable.readWhere('(wvlStart < wavelength) & (wavelength < wvlEnd)')['phase']
photFile.close()
print 'cut wavelengths to range ({},{})'.format(wvlStart,wvlEnd)
nPhotons = len(phases)
print nPhotons,'real photons read'
observedProfile,_ = np.histogram(phases,bins=phaseBinEdges)
observedProfile = 1.0*observedProfile
observedProfileErrors = np.sqrt(observedProfile)
#Do H-test
hDict = h_test2(phases)
H,M,pval,fourierCoeffs = hDict['H'],hDict['M'],hDict['fpp'],hDict['cs']
print 'h-test on real data'
print 'H,M,fpp:',H,M,pval
print nSigma(1-pval),'sigmas'
#h_test2 calculates all fourierCoeffs out to 20, but for the fourier model, we only want the ones out to order M, which optimizes the Zm^2 metric
truncatedFourierCoeffs = fourierCoeffs[0:M]
print 'fourier coeffs:',truncatedFourierCoeffs
#for the model, we want the negative modes as well as positve, so add them
modelFourierCoeffs = np.concatenate([truncatedFourierCoeffs[::-1],[1.],np.conj(truncatedFourierCoeffs)])
#make array of mode numbers
modes = np.arange(-len(truncatedFourierCoeffs),len(truncatedFourierCoeffs)+1)
#save so next time we can set bLoadFromPl=False
np.savez(hTestPath,H=H,M=M,pval=pval,fourierCoeffs=fourierCoeffs,nPhotons=nPhotons,wvlRange=(wvlStart,wvlEnd),modelFourierCoeffs=modelFourierCoeffs,modes=modes,observedProfile=observedProfile,observedProfileErrors=observedProfileErrors,phaseBinEdges=phaseBinEdges)
else:
#Load values from previous run, when we had bLoadFromPl=True
hTestDict = np.load(hTestPath)
H,M,pval,fourierCoeffs,nPhotons,modelFourierCoeffs,modes = hTestDict['H'],hTestDict['M'],hTestDict['pval'],hTestDict['fourierCoeffs'],hTestDict['nPhotons'],hTestDict['modelFourierCoeffs'],hTestDict['modes']
observedProfile,observedProfileErrors,phaseBinEdges = hTestDict['observedProfile'],hTestDict['observedProfileErrors'],hTestDict['phaseBinEdges']
print 'h-test on real data'
print 'H,M,fpp:',H,M,pval
print nSigma(1-pval),'sigmas'
#Plot the observed profile
fig,ax = plt.subplots(1,1)
plotPulseProfile(phaseBinEdges,observedProfile,profileErrors=observedProfileErrors,color='k',plotDoublePulse=False,label='observed',ax=ax)
ax.set_ylabel('counts')
ax.set_xlabel('phase')
ax.set_title('Observed Folded Light Curve {}-{} nm'.format(wvlStart/10.,wvlEnd/10.))
#make as set of x points for the pulse model we'll make
#Do NOT include x=0, or the inverted function will have a jump that causes an excess of samples
#at phase=0
nSmoothPlotPoints=1000
pulseModelQueryPoints = np.linspace(1./nSmoothPlotPoints,1,nSmoothPlotPoints)
def modelProfile(thetas):
return np.sum( modelFourierCoeffs * np.exp(2.j*np.pi*modes*thetas[:,np.newaxis]),axis=1)
lightCurveModel = np.abs(modelProfile(pulseModelQueryPoints))
#for this test we only want the model to be the pulsed component. We will add a DC offset later
pulseModel = lightCurveModel - np.min(lightCurveModel)
#initialPhotonPulseFraction = 1.*np.sum(pulseModel) / np.sum(lightCurveModel)
photonPulseFraction=15400./nPhotons #skip to previously determined answer
print 'photon fraction',photonPulseFraction
#get samples with distribution of the modelProfile
#modelSampler = inverseTransformSampler(pdf=lightCurveModel,queryPoints=pulseModelQueryPoints)
modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints)
nTrials = 1
#for each trial run the h test on a set of photon phases with our model profile, and with the pulse fraction specified
#we want to make a distribution of H values for this pulse fraction, model, and number of photons
#make a function that only takes the trial number (as an identifier)
mappableHTestTrial = functools.partial(hTestTrial,pulseModel=pulseModel,
pulseModelQueryPoints=pulseModelQueryPoints,nPhotons=nPhotons,
photonPulseFraction=photonPulseFraction)
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()-3)#leave a few processors for other people
outDicts = pool.map(mappableHTestTrial,np.arange(nTrials))
simHs = np.array([out['H'] for out in outDicts])
simPvals = np.array([out['fpp'] for out in outDicts])
#save the resulting list of H vals
np.savez('sim3-h-{}.npz'.format(nTrials),simHs=simHs,simPvals=simPvals,pval=pval,H=H,photonPulseFraction=photonPulseFraction,nPhotons=nPhotons)
#make a model profile once more for a plot
modelSampler = inverseTransformSampler(pdf=pulseModel,queryPoints=pulseModelQueryPoints)
nPulsePhotons = int(np.floor(photonPulseFraction*nPhotons))
nBackgroundPhotons = int(np.ceil((1.-photonPulseFraction) * nPhotons))
simPulsePhotons = modelSampler(nPulsePhotons)
#background photons come from a uniform distribution
simBackgroundPhotons = np.random.random(nBackgroundPhotons)
#put them together for the full profile
simPhases = np.append(simPulsePhotons,simBackgroundPhotons)
#make a binned phase profile to plot
simProfile,_ = np.histogram(simPhases,bins=phaseBinEdges)
simProfileErrors = np.sqrt(simProfile)#assume Poisson errors
meanLevel = np.mean(simProfile)
fig,ax = plt.subplots(1,1)
ax.plot(pulseModelQueryPoints,meanLevel*lightCurveModel,color='r')
plotPulseProfile(phaseBinEdges,simProfile,profileErrors=simProfileErrors,color='b',plotDoublePulse=False,label='sim',ax=ax)
ax.set_title('Simulated profile')
#
#plt.show()
print '{} trials'.format(len(simHs))
print 'observed fpp:',pval
frac = 1.*np.sum(simPvals<pval)/len(simPvals)
print 'fraction of trials with H below observed fpp:',frac
#hHist,hBinEdges = np.histogram(simHs,bins=100,density=True)
fppHist,fppBinEdges = np.histogram(simPvals,bins=100,density=True)
if nTrials > 1:
fig,ax = plt.subplots(1,1)
ax.plot(fppBinEdges[0:-1],fppHist,drawstyle='steps-post',color='k')
ax.axvline(pval,color='r')
ax.set_xlabel('fpp')
ax.set_ylabel('frequency')
ax.set_title('Distribution of H for model profile')
magG = 17.93
sineMagDiff = -2.5*np.log10(photonPulseFraction)
print 'SDSS magnitude g: {:.2f}'.format(magG)
print 'magnitude difference: {:.2f}'.format(sineMagDiff)
print 'limiting g mag: {:.2f}'.format(magG+sineMagDiff)
plt.show()
| gpl-2.0 |
alanquillin/ryu | ryu/contrib/tinyrpc/transports/__init__.py | 43 | 1789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class ServerTransport(object):
"""Base class for all server transports."""
def receive_message(self):
"""Receive a message from the transport.
Blocks until another message has been received. May return a context
opaque to clients that should be passed on
:py:func:`~tinyrpc.transport.Transport.send_reply` to identify the
client later on.
:return: A tuple consisting of ``(context, message)``.
"""
raise NotImplementedError()
def send_reply(self, context, reply):
"""Sends a reply to a client.
The client is usually identified by passing ``context`` as returned
from the original
:py:func:`~tinyrpc.transport.Transport.receive_message` call.
Messages must be strings, it is up to the sender to convert the
beforehand. A non-string value raises a :py:exc:`TypeError`.
:param context: A context returned by
:py:func:`~tinyrpc.transport.Transport.receive_message`.
:param reply: A string to send back as the reply.
"""
raise NotImplementedError
class ClientTransport(object):
"""Base class for all client transports."""
def send_message(self, message, expect_reply=True):
"""Send a message to the server and possibly receive a reply.
Sends a message to the connected server.
Messages must be strings, it is up to the sender to convert the
beforehand. A non-string value raises a :py:exc:`TypeError`.
This function will block until one reply has been received.
:param message: A string to send.
:return: A string containing the server reply.
"""
raise NotImplementedError
| apache-2.0 |
ImmobilienScout24/moto | tests/test_kms/test_kms.py | 9 | 11046 | from __future__ import unicode_literals
import re
import boto.kms
from boto.exception import JSONResponseError
from boto.kms.exceptions import AlreadyExistsException, NotFoundException
import sure # noqa
from moto import mock_kms
from nose.tools import assert_raises
@mock_kms
def test_create_key():
conn = boto.kms.connect_to_region("us-west-2")
key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT')
key['KeyMetadata']['Description'].should.equal("my key")
key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT")
key['KeyMetadata']['Enabled'].should.equal(True)
@mock_kms
def test_describe_key():
conn = boto.kms.connect_to_region("us-west-2")
key = conn.create_key(policy="my policy", description="my key", key_usage='ENCRYPT_DECRYPT')
key_id = key['KeyMetadata']['KeyId']
key = conn.describe_key(key_id)
key['KeyMetadata']['Description'].should.equal("my key")
key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT")
@mock_kms
def test_describe_missing_key():
conn = boto.kms.connect_to_region("us-west-2")
conn.describe_key.when.called_with("not-a-key").should.throw(JSONResponseError)
@mock_kms
def test_list_keys():
conn = boto.kms.connect_to_region("us-west-2")
conn.create_key(policy="my policy", description="my key1", key_usage='ENCRYPT_DECRYPT')
conn.create_key(policy="my policy", description="my key2", key_usage='ENCRYPT_DECRYPT')
keys = conn.list_keys()
keys['Keys'].should.have.length_of(2)
@mock_kms
def test__create_alias__returns_none_if_correct():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
resp = kms.create_alias('alias/my-alias', key_id)
resp.should.be.none
@mock_kms
def test__create_alias__raises_if_reserved_alias():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
reserved_aliases = [
'alias/aws/ebs',
'alias/aws/s3',
'alias/aws/redshift',
'alias/aws/rds',
]
for alias_name in reserved_aliases:
with assert_raises(JSONResponseError) as err:
kms.create_alias(alias_name, key_id)
ex = err.exception
ex.error_message.should.be.none
ex.error_code.should.equal('NotAuthorizedException')
ex.body.should.equal({'__type': 'NotAuthorizedException'})
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__create_alias__can_create_multiple_aliases_for_same_key_id():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
kms.create_alias('alias/my-alias3', key_id).should.be.none
kms.create_alias('alias/my-alias4', key_id).should.be.none
kms.create_alias('alias/my-alias5', key_id).should.be.none
@mock_kms
def test__create_alias__raises_if_wrong_prefix():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
with assert_raises(JSONResponseError) as err:
kms.create_alias('wrongprefix/my-alias', key_id)
ex = err.exception
ex.error_message.should.equal('Invalid identifier')
ex.error_code.should.equal('ValidationException')
ex.body.should.equal({'message': 'Invalid identifier', '__type': 'ValidationException'})
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__create_alias__raises_if_duplicate():
region = 'us-west-2'
kms = boto.kms.connect_to_region(region)
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
alias = 'alias/my-alias'
kms.create_alias(alias, key_id)
with assert_raises(AlreadyExistsException) as err:
kms.create_alias(alias, key_id)
ex = err.exception
ex.error_message.should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists'
.format(**locals()))
ex.error_code.should.be.none
ex.box_usage.should.be.none
ex.request_id.should.be.none
ex.body['message'].should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists'
.format(**locals()))
ex.body['__type'].should.equal('AlreadyExistsException')
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__create_alias__raises_if_alias_has_restricted_characters():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
alias_names_with_restricted_characters = [
'alias/my-alias!',
'alias/my-alias$',
'alias/my-alias@',
]
for alias_name in alias_names_with_restricted_characters:
with assert_raises(JSONResponseError) as err:
kms.create_alias(alias_name, key_id)
ex = err.exception
ex.body['__type'].should.equal('ValidationException')
ex.body['message'].should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals()))
ex.error_code.should.equal('ValidationException')
ex.message.should.equal("1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals()))
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__create_alias__raises_if_alias_has_colon_character():
# For some reason, colons are not accepted for an alias, even though they are accepted by regex ^[a-zA-Z0-9:/_-]+$
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
alias_names_with_restricted_characters = [
'alias/my:alias',
]
for alias_name in alias_names_with_restricted_characters:
with assert_raises(JSONResponseError) as err:
kms.create_alias(alias_name, key_id)
ex = err.exception
ex.body['__type'].should.equal('ValidationException')
ex.body['message'].should.equal("{alias_name} contains invalid characters for an alias".format(**locals()))
ex.error_code.should.equal('ValidationException')
ex.message.should.equal("{alias_name} contains invalid characters for an alias".format(**locals()))
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__create_alias__accepted_characters():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
alias_names_with_accepted_characters = [
'alias/my-alias_/',
'alias/my_alias-/',
]
for alias_name in alias_names_with_accepted_characters:
kms.create_alias(alias_name, key_id)
@mock_kms
def test__create_alias__raises_if_target_key_id_is_existing_alias():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
alias = 'alias/my-alias'
kms.create_alias(alias, key_id)
with assert_raises(JSONResponseError) as err:
kms.create_alias(alias, alias)
ex = err.exception
ex.body['__type'].should.equal('ValidationException')
ex.body['message'].should.equal('Aliases must refer to keys. Not aliases')
ex.error_code.should.equal('ValidationException')
ex.message.should.equal('Aliases must refer to keys. Not aliases')
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__delete_alias():
kms = boto.connect_kms()
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
alias = 'alias/my-alias'
kms.create_alias(alias, key_id)
resp = kms.delete_alias(alias)
resp.should.be.none
# we can create the alias again, since it has been deleted
kms.create_alias(alias, key_id)
@mock_kms
def test__delete_alias__raises_if_wrong_prefix():
kms = boto.connect_kms()
with assert_raises(JSONResponseError) as err:
kms.delete_alias('wrongprefix/my-alias')
ex = err.exception
ex.body['__type'].should.equal('ValidationException')
ex.body['message'].should.equal('Invalid identifier')
ex.error_code.should.equal('ValidationException')
ex.message.should.equal('Invalid identifier')
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_kms
def test__delete_alias__raises_if_alias_is_not_found():
region = 'us-west-2'
kms = boto.kms.connect_to_region(region)
alias_name = 'alias/unexisting-alias'
with assert_raises(NotFoundException) as err:
kms.delete_alias(alias_name)
ex = err.exception
ex.body['__type'].should.equal('NotFoundException')
ex.body['message'].should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals()))
ex.box_usage.should.be.none
ex.error_code.should.be.none
ex.message.should.match(r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals()))
ex.reason.should.equal('Bad Request')
ex.request_id.should.be.none
ex.status.should.equal(400)
@mock_kms
def test__list_aliases():
region = "eu-west-1"
kms = boto.kms.connect_to_region(region)
create_resp = kms.create_key()
key_id = create_resp['KeyMetadata']['KeyId']
kms.create_alias('alias/my-alias1', key_id)
kms.create_alias('alias/my-alias2', key_id)
kms.create_alias('alias/my-alias3', key_id)
resp = kms.list_aliases()
resp['Truncated'].should.be.false
aliases = resp['Aliases']
def has_correct_arn(alias_obj):
alias_name = alias_obj['AliasName']
alias_arn = alias_obj['AliasArn']
return re.match(r'arn:aws:kms:{region}:\d{{12}}:{alias_name}'.format(region=region, alias_name=alias_name),
alias_arn)
len([alias for alias in aliases if
has_correct_arn(alias) and 'alias/aws/ebs' == alias['AliasName']]).should.equal(1)
len([alias for alias in aliases if
has_correct_arn(alias) and 'alias/aws/rds' == alias['AliasName']]).should.equal(1)
len([alias for alias in aliases if
has_correct_arn(alias) and 'alias/aws/redshift' == alias['AliasName']]).should.equal(1)
len([alias for alias in aliases if
has_correct_arn(alias) and 'alias/aws/s3' == alias['AliasName']]).should.equal(1)
len([alias for alias in aliases if
has_correct_arn(alias) and 'alias/my-alias1' == alias['AliasName']]).should.equal(1)
len([alias for alias in aliases if
has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1)
len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == alias['TargetKeyId']]).should.equal(3)
len(aliases).should.equal(7)
| apache-2.0 |
DESHRAJ/fjord | vendor/packages/translate-toolkit/translate/misc/multistring.py | 29 | 3583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Supports a hybrid Unicode string that can also have a list of alternate
strings in the strings attribute"""
from translate.misc import autoencode
class multistring(autoencode.autoencode):
def __new__(newtype, string=u"", encoding=None, errors=None):
if isinstance(string, list):
if not string:
raise ValueError("multistring must contain at least one string")
mainstring = string[0]
newstring = multistring.__new__(newtype, string[0],
encoding, errors)
newstring.strings = [newstring] + [autoencode.autoencode.__new__(autoencode.autoencode, altstring, encoding, errors) for altstring in string[1:]]
else:
newstring = autoencode.autoencode.__new__(newtype, string,
encoding, errors)
newstring.strings = [newstring]
return newstring
def __init__(self, *args, **kwargs):
super(multistring, self).__init__()
if not hasattr(self, "strings"):
self.strings = []
def __cmp__(self, otherstring):
if isinstance(otherstring, multistring):
parentcompare = cmp(autoencode.autoencode(self), otherstring)
if parentcompare:
return parentcompare
else:
return cmp(self.strings[1:], otherstring.strings[1:])
elif isinstance(otherstring, autoencode.autoencode):
return cmp(autoencode.autoencode(self), otherstring)
elif isinstance(otherstring, unicode):
return cmp(unicode(self), otherstring)
elif isinstance(otherstring, str):
return cmp(str(self), otherstring)
elif isinstance(otherstring, list) and otherstring:
return cmp(self, multistring(otherstring))
else:
return cmp(type(self), type(otherstring))
def __ne__(self, otherstring):
return self.__cmp__(otherstring) != 0
def __eq__(self, otherstring):
return self.__cmp__(otherstring) == 0
def __repr__(self):
parts = [autoencode.autoencode.__repr__(self)] + \
[repr(a) for a in self.strings[1:]]
return "multistring([" + ",".join(parts) + "])"
def replace(self, old, new, count=None):
if count is None:
newstr = multistring(super(multistring, self) \
.replace(old, new), self.encoding)
else:
newstr = multistring(super(multistring, self) \
.replace(old, new, count), self.encoding)
for s in self.strings[1:]:
if count is None:
newstr.strings.append(s.replace(old, new))
else:
newstr.strings.append(s.replace(old, new, count))
return newstr
| bsd-3-clause |
Saevon/webdnd | shared/utils/debug_toolbars.py | 1 | 1502 | import django
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
import sys
class VersionDebugPanel(DebugPanel):
'''
Panel that displays the Django version.
'''
name = 'Version'
has_content = True
def nav_title(self):
return _('Versions')
def nav_subtitle(self):
return 'Django %s' % django.get_version()
def url(self):
return ''
def title(self):
return _('Versions')
def content(self):
versions = {}
versions['Web D&D'] = settings.VERSION
versions['Syncrae'] = settings.SYNCRAE_VERSION
context = self.context.copy()
context.update({
'versions': versions,
'paths': sys.path,
})
return render_to_string('debug_toolbar/panels/versions.html', context)
class SyncraeSpyDebugPanel(DebugPanel):
'''
Panel that shows Syncrae Messages
'''
name = 'Syncrae'
has_content = True
def nav_title(self):
return _('Syncrae')
def nav_subtitle(self):
return ''
def url(self):
return ''
def title(self):
return _('Syncrae')
def content(self):
return render_to_string('debug_syncrae.html', self.context)
class DividerDebugPanel(DebugPanel):
name = 'Divider'
has_content = False
def nav_title(self):
return ' '
| mit |
stefco/geco_data | geco_irig_plot.py | 1 | 5662 | #!/usr/bin/env python
# (c) Stefan Countryman, 2016-2017
DESC="""Plot an IRIG-B signal read from stdin. Assumes that the timeseries
is a sequence of newline-delimited float literals."""
FAST_CHANNEL_BITRATE = 16384 # for IRIG-B, DuoTone, etc.
# THE REST OF THE IMPORTS ARE AFTER THIS IF STATEMENT.
# Quits immediately on --help or -h flags to skip slow imports when you just
# want to read the help documentation.
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=DESC)
# TODO: make this -i and --ifo instead of detector.
parser.add_argument("--detector",
help=("the detector; used in the title of the output "
"plot"))
parser.add_argument("-O", "--outfile",
help="the filename of the generated plot")
parser.add_argument("-T", "--timeseries",
help="copy from stdin to stdout while reading",
action="store_true")
parser.add_argument("-A", "--actualtime",
help=("actual time signal was recorded "
"(appears in title)"))
args = parser.parse_args()
# Force matplotlib to not use any Xwindows backend. NECESSARY ON CLUSTER.
import matplotlib
matplotlib.use('Agg')
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import geco_irig_decode
def read_timeseries_stdin(num_lines, cat_to_stdout=False):
"""Read in newline-delimited numerical data from stdin; don't read more
than a second worth of data. If cat_to_stdout is True, print data that
has been read in back to stdout (useful for piped commands)."""
timeseries = np.zeros(num_lines)
line = ""
i = 0
while i < num_lines:
line = float(sys.stdin.readline())
timeseries[i] = line
if cat_to_stdout:
print(line)
i += 1
return timeseries
def irigb_decoded_title(timeseries, IFO=None, actual_time=None):
"""Get a title for an IRIG-B timeseries plot that includes the decoded
time in the timeseries itself."""
# get the detector name
if IFO is None:
detector_suffix = ""
else:
detector_suffix = " at " + IFO
# get the actual time of recording, if provided
if actual_time is None:
actual_time_str = ""
else:
actual_time_str = "\nActual Time: {}".format(actual_time)
# add title and so on
try:
decoded_time = geco_irig_decode.get_date_from_timeseries(timeseries)
decoded_time_str = decoded_time.strftime('%a %b %d %X %Y')
except ValueError as e:
decoded_time_str = "COULD NOT DECODE TIME"
fmt = "One Second of IRIG-B Signal{}\nDecoded Time: {}{}"
return fmt.format(detector_suffix, decoded_time_str, actual_time_str)
def irigb_output_filename(outfile=None):
"""Get the output filename for an IRIG-B plot."""
if outfile is None:
output_filename = "irigb-plot-made-at-" + str(time.time()) + ".png"
else:
output_filename = outfile
# append .png if not already there
if output_filename.split(".")[-1] != "png":
output_filename += ".png"
return output_filename
def plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1.,
output_filename=None, overlay=False, linewidth=1):
"""Plot a timeseries and produce num_subdivs subplots that show equal-sized
subdivisions of the full timeseries data to show details (good for
high-bitrate timeseries). If you want to keep plotting data to the same
figure, set 'overlay=True', and the current figure will be plotted to."""
bitrate = int(len(timeseries) / float(dt))
times = np.linspace(0, 1, num=bitrate, endpoint=False)
# find max and min values in timeseries; use these to set plot boundaries
yrange = timeseries.max() - timeseries.min()
ymax = timeseries.max() + 0.1*yrange
ymin = timeseries.min() - 0.1*yrange
if not overlay:
plt.figure()
# print("making plot")
plt.gcf().set_figwidth(7)
plt.gcf().set_figheight(4+1.2*num_subdivs) # ~1.2in height per zoomed plot
# plot the full second on the first row; lines should be black ('k' option).
plt.subplot(num_subdivs + 1, 1, 1)
plt.ylim(ymin, ymax)
plt.plot(times, timeseries, 'k', linewidth=linewidth)
plt.tick_params(axis='y', labelsize='small')
# make num_subdivs subplots to better show the full second
for i in range(num_subdivs):
# print("making plot " + str(i))
plt.subplot(num_subdivs+1, 1, i+2)
plt.ylim(ymin, ymax)
plt.xlim(float(i)/num_subdivs, (float(i)+1)/num_subdivs)
start = bitrate*i // num_subdivs
end = bitrate*(i+1) // num_subdivs
plt.plot(times[start:end], timeseries[start:end], 'k',
linewidth=linewidth)
plt.tick_params(axis='y', labelsize='small')
plt.suptitle(title)
plt.xlabel("Time since start of second [$s$]")
# print("saving plot")
plt.subplots_adjust(left=0.125, right=0.9, bottom=0.1, top=0.9, wspace=0.2,
hspace=0.5)
if not (output_filename is None):
plt.savefig(output_filename)
return plt
if __name__ == '__main__':
timeseries = read_timeseries_stdin(FAST_CHANNEL_BITRATE,
cat_to_stdout=args.timeseries)
title = irigb_decoded_title(timeseries, args.detector, args.actualtime)
output_filename = irigb_output_filename(args.outfile)
plot_with_zoomed_views(timeseries, title, num_subdivs=5, dt=1.,
output_filename=output_filename)
| mit |
SnappleCap/oh-mainline | vendor/packages/twisted/doc/conch/examples/sshsimpleserver.py | 18 | 3772 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.cred import portal, checkers
from twisted.conch import error, avatar
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.ssh import factory, userauth, connection, keys, session
from twisted.internet import reactor, protocol, defer
from twisted.python import log
from zope.interface import implements
import sys
log.startLogging(sys.stderr)
"""
Example of running another protocol over an SSH channel.
log in with username "user" and password "password".
"""
class ExampleAvatar(avatar.ConchUser):
def __init__(self, username):
avatar.ConchUser.__init__(self)
self.username = username
self.channelLookup.update({'session':session.SSHSession})
class ExampleRealm:
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
return interfaces[0], ExampleAvatar(avatarId), lambda: None
class EchoProtocol(protocol.Protocol):
"""this is our example protocol that we will run over SSH
"""
def dataReceived(self, data):
if data == '\r':
data = '\r\n'
elif data == '\x03': #^C
self.transport.loseConnection()
return
self.transport.write(data)
publicKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJSkbh/C+BR3utDS555mV'
privateKey = """-----BEGIN RSA PRIVATE KEY-----
MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW
4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw
vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb
Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1
xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8
PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2
gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu
DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML
pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP
EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg==
-----END RSA PRIVATE KEY-----"""
class InMemoryPublicKeyChecker(SSHPublicKeyDatabase):
def checkKey(self, credentials):
return credentials.username == 'user' and \
keys.Key.fromString(data=publicKey).blob() == credentials.blob
class ExampleSession:
def __init__(self, avatar):
"""
We don't use it, but the adapter is passed the avatar as its first
argument.
"""
def getPty(self, term, windowSize, attrs):
pass
def execCommand(self, proto, cmd):
raise Exception("no executing commands")
def openShell(self, trans):
ep = EchoProtocol()
ep.makeConnection(trans)
trans.makeConnection(session.wrapProtocol(ep))
def eofReceived(self):
pass
def closed(self):
pass
from twisted.python import components
components.registerAdapter(ExampleSession, ExampleAvatar, session.ISession)
class ExampleFactory(factory.SSHFactory):
publicKeys = {
'ssh-rsa': keys.Key.fromString(data=publicKey)
}
privateKeys = {
'ssh-rsa': keys.Key.fromString(data=privateKey)
}
services = {
'ssh-userauth': userauth.SSHUserAuthServer,
'ssh-connection': connection.SSHConnection
}
portal = portal.Portal(ExampleRealm())
passwdDB = checkers.InMemoryUsernamePasswordDatabaseDontUse()
passwdDB.addUser('user', 'password')
portal.registerChecker(passwdDB)
portal.registerChecker(InMemoryPublicKeyChecker())
ExampleFactory.portal = portal
if __name__ == '__main__':
reactor.listenTCP(5022, ExampleFactory())
reactor.run()
| agpl-3.0 |
TNick/pyl2extra | pyl2extra/datasets/images.py | 1 | 13590 | """
Dataset for images and related functionality.
This module does not have dependencies inside pyl2extra package, so you
can just copy-paste it inside your source tree.
To use this dataset prepare a .csv file with targets (integers or real numbers)
on first column and file paths on the second column:
.. code::
0,file1.png
1,file2.png
Image file paths are relative to current directory (``os.getcwd()``). The
images need not be square and can be in any format recognized by the
``Image`` module. Internally, the images are converted to RGB and are made
square for you.
Use it in a .yaml file like so:
.. code::
dataset: &trndataset !obj:pyl2extra.datasets.images.Images {
source: 'train.csv',
image_size: 128
}
The ``image_size`` can be skipped, in which case the size of the images is
derived from first image that is provided.
By default the class assumes a classification problem (targets are integers).
If you need to uset it in a regression problem create it like so:
.. code::
dataset: &trndataset !obj:pyl2extra.datasets.images.Images {
source: 'train.csv',
image_size: 128,
regression: True
}
As the dataset simply wraps the ``DenseDesignMatrix``, parameters like
``rng`` (random number generator), ``preprocessor`` and ``fit_preprocessor``
can be used and will be passed to ``DenseDesignMatrix`` superclass.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "[email protected]"
import csv
import numpy
import os
from PIL import Image
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
import theano
class Images(DenseDesignMatrix):
"""
A pylearn2 dataset that loads the images from a list or csv file.
Please note that - if you use this dataset and your model has a
final Softmax layer you should construct it like so (YAML syntax):
.. code::
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
irange: .0,
n_classes: %(classes)d,
binary_target_dim: 1
}
where ``classes`` is the same number of classes passed to ``Images``
constructor. ``binary_target_dim`` is important and failing to set it
constructs the wrong architecture, causing errors like:
ValueError: VectorSpace(dim=1, dtype=float32) with total dimension 1
can't format a batch into VectorSpace(dim=X, dtype=float32) because
its total dimension is X.
Parameters
----------
source: OrderedDict, dict, str, tuple, list
This argument provides the input images and (optionally)
associated categories. The meaning of the argument depends
on the data type:
- if ``source`` is a string, it is interpreted to be the
path towards a csv file; the file must NOT have a header,
first column must contain the targets (classes or values) and
second column must contain the paths for the image files;
- if ``source`` is a dictionary, the keys must be the
paths for image files, ``Image`` instances or numpy arrays and
the values must be the classes or values (None or empty
string if this instance does not provide the labels);
- a tuple or list must have exactly one or two
members: first one must be a list or tuple of image paths or
Images or numpy arrays, while second one (optional)
has the targets (classes as integers or real values).
image_size: int, optional
The size of the images in the final dataset. All images
will be resized to be ``image_size`` x ``image_size``
pixels.
classes: int, optional
If this is a classification problem the parameter should be
used to indicate the total number of classes and targets are
expected to be integers in the range ``[0; classes-1]``.
If this is a regression problem the parameter should be ``None`` and
targets are expected to be real numbers.
rng: object, optional
A random number generator used for picking random \
indices into the design matrix when choosing minibatches.
preprocessor: Preprocessor, optional
Preprocessor to apply to images.
fit_preprocessor: bool, optional
Whether preprocessor can fit parameters when applied to training
data.
"""
def __init__(self, source, image_size=None, classes=None,
rng=None, preprocessor=None, fit_preprocessor=False):
#: preserve original argument for future reference
self.source = source
#: Number of classes (None for regression)
self.classes = classes
# all images are loaded in ``ind`` variable
ind = _init_input(source)
# DenseDesignMatrix expects us to provide a numpy array
# we choose to have number of examples on first axis ('b'),
# then rows and columns of the image, then the channels
# always 3 in our case
self.axes = ('b', 0, 1, 'c')
if image_size is None:
dense_x = None
else:
dense_x = numpy.zeros(shape=(len(ind), image_size, image_size, 3),
dtype='uint8')
categories = []
has_targets = False
for i, (img, ctg) in enumerate(ind):
if isinstance(img, Image.Image):
img = numpy.array(img)
width = img.shape[1]
height = img.shape[0]
largest = max(width, height)
if image_size is None:
# if the user did not specify an image size we determine
# the size using the first image that we encounter; this is
# usefull if all images are already of required size,
# for example
image_size = largest
dense_x = numpy.zeros(shape=(len(ind), image_size,
image_size, 3),
dtype='uint8')
imgin = img
# do we need to enlarge / shrink the image?
elif largest != image_size:
wpercent = image_size / float(largest)
width = int(width * wpercent)
height = int(height * wpercent)
largest = max(width, height)
# inefficient? could use scipy.ndimage.zoom.
img_tmp = Image.fromarray(img)
img_tmp = img_tmp.resize((width, height), Image.ANTIALIAS)
imgin = numpy.array(img_tmp)
else:
imgin = img
delta_x = (largest - width) / 2
delta_y = (largest - height) / 2
delta_x2 = delta_x + width
delta_y2 = delta_y + height
#print delta_x, delta_y, delta_x2, delta_y2, width, height
dense_x[i, delta_y:delta_y2, delta_x:delta_x2, :] = imgin
categories.append(ctg)
if ctg != '':
has_targets = True
dense_x = numpy.cast[theano.config.floatX](dense_x)
# if we have categories / values convert them to proper format
if has_targets:
if classes is None:
# in regression we expect real values
dense_y = numpy.empty(shape=(len(ind), 1),
dtype=theano.config.floatX)
for i, ctg in enumerate(categories):
dense_y[i, 0] = float(ctg)
else:
# in classification we expect integers
dense_y = numpy.empty(shape=(len(ind), 1), dtype=int)
for i, ctg in enumerate(categories):
dense_y[i, 0] = int(ctg)
else:
dense_y = None
if rng is None:
rng = DenseDesignMatrix._default_seed
# everything else is handled by the DenseDesignMatrix superclass
super(Images, self).__init__(topo_view=dense_x,
y=dense_y,
axes=self.axes,
view_converter=None,
preprocessor=preprocessor,
fit_preprocessor=fit_preprocessor,
X_labels=None,
y_labels=classes if has_targets else None)
def _init_input(source):
"""
Homogenize sources.
"""
if isinstance(source, basestring):
# this is a csv file that we're going to read
result = _load_list(_load_csv(source))
elif isinstance(source, dict):
# keys are file names, values are classes
result = _load_list(source.items())
elif isinstance(source, (list, tuple)):
# one item lists the files, the other lists the classes
if len(source) == 1:
result = _load_list([(src, None) for src in source[0]])
elif len(source) == 2:
if len(source[0]) == len(source[1]):
result = _load_list(zip(source[0], source[1]))
else:
raise ValueError("Lists/tuples provded to Images class "
"constructor are expected to have "
"same length (%d != %d)" %
(len(source[0]), len(source[1])))
else:
raise ValueError("Lists/tuples provided to Images class "
"constructor are expected to have one "
"(images only) or two members (images"
" and classes); the input has %d members." %
len(source))
else:
raise ValueError("Images class expects for its `source` argument "
"a file path (string), a dictionary of "
"file:class pairs, or a pair of lists (tuples); "
"%s is not supported" % str(source.__class__))
return result
def _load_csv(csv_path):
"""
Internal function for loading the content from a .csv file.
Parameters
----------
csv_path: str
The path towards the .csv file to read.
Returns
-------
result: list of tuples
The method creates a list of tuples that should be passed to
`_load_list()`.
"""
# we're going to accumulate files and categories here
result = []
# compute absolute path of the source csv file
csv_path = os.path.abspath(csv_path)
with open(csv_path, 'rt') as fhand:
# the reader is flexible, allowing delimiters
# other than comma; quotation can also be customized
csvr = csv.reader(fhand,
delimiter=',',
quotechar='"')
# the reader will give us a list for each row of
# the source file
for row in csvr:
# we're going to skip empty rows without warning
if len(row) == 0:
continue
# we could skip the header here, if present; we
# could even detect the column index from its
# name; but we try to keep the things simple
# class/value is always first, file path second
result.append((row[1], row[0]))
return result
def _load_list(srclist):
"""
Internal function for loading the content from a list.
Image files are converted to `numpy.ndarray`;
empty classes are normalized to a string of lenghth 0.
Parameters
----------
srclist: list of tuples
A list of tuples, with first entry in tuple being
a string, an Image or `numpy.ndarray` instances and
second being classes (None for no class).
Returns
-------
result: list of tuples
The method creates a list of tuples, with first entry in tuple being
`numpy.ndarray` instances and second being targets (None for no
target) - integer classes (classification) or real values
(regression).
"""
# we're going to accumulate Images and categories here
result = []
for img, cls in srclist:
if isinstance(img, basestring):
imgin = Image.open(img)
elif isinstance(img, numpy.ndarray):
imgin = Image.fromarray(img)
elif isinstance(img, Image.Image):
imgin = img
elif Image.isImageType(img):
imgin = img
else:
raise ValueError("Valid input for images are strings (a "
"path towards a file), pil images "
"and numpy arrays; %s is not supported" %
str(img.__class__))
if cls is None:
cls = ''
imgin = imgin.convert('RGB')
result.append((numpy.array(imgin), cls))
return result
def one_image(image, image_size=None, classes=None,
rng=None, preprocessor=None, fit_preprocessor=False):
"""
Convenience function that creates an Images dataset from a single image.
Parameters
----------
image: string, image or numpy.ndarray
The image to use as source.
See :class:`Images` for a description of other parameters.
"""
return Images(source=((image,),),
image_size=image_size, classes=classes,
rng=rng, preprocessor=preprocessor,
fit_preprocessor=fit_preprocessor)
| bsd-3-clause |
shanglt/youtube-dl | youtube_dl/extractor/planetaplay.py | 113 | 1921 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class PlanetaPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?planetaplay\.com/\?sng=(?P<id>[0-9]+)'
_API_URL = 'http://planetaplay.com/action/playlist/?sng={0:}'
_THUMBNAIL_URL = 'http://planetaplay.com/img/thumb/{thumb:}'
_TEST = {
'url': 'http://planetaplay.com/?sng=3586',
'md5': '9d569dceb7251a4e01355d5aea60f9db',
'info_dict': {
'id': '3586',
'ext': 'flv',
'title': 'md5:e829428ee28b1deed00de90de49d1da1',
},
'skip': 'Not accessible from Travis CI server',
}
_SONG_FORMATS = {
'lq': (0, 'http://www.planetaplay.com/videoplayback/{med_hash:}'),
'hq': (1, 'http://www.planetaplay.com/videoplayback/hi/{med_hash:}'),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
response = self._download_json(
self._API_URL.format(video_id), video_id)['response']
try:
data = response.get('data')[0]
except IndexError:
raise ExtractorError(
'%s: failed to get the playlist' % self.IE_NAME, expected=True)
title = '{song_artists:} - {sng_name:}'.format(**data)
thumbnail = self._THUMBNAIL_URL.format(**data)
formats = []
for format_id, (quality, url_template) in self._SONG_FORMATS.items():
formats.append({
'format_id': format_id,
'url': url_template.format(**data),
'quality': quality,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
| unlicense |
ampax/edx-platform-backup | common/djangoapps/external_auth/migrations/0001_initial.py | 114 | 6388 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ExternalAuthMap'
db.create_table('external_auth_externalauthmap', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('external_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('external_domain', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('external_credentials', self.gf('django.db.models.fields.TextField')(blank=True)),
('external_email', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('external_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True)),
('internal_password', self.gf('django.db.models.fields.CharField')(max_length=31, blank=True)),
('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('dtsignup', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal('external_auth', ['ExternalAuthMap'])
# Adding unique constraint on 'ExternalAuthMap', fields ['external_id', 'external_domain']
db.create_unique('external_auth_externalauthmap', ['external_id', 'external_domain'])
def backwards(self, orm):
# Removing unique constraint on 'ExternalAuthMap', fields ['external_id', 'external_domain']
db.delete_unique('external_auth_externalauthmap', ['external_id', 'external_domain'])
# Deleting model 'ExternalAuthMap'
db.delete_table('external_auth_externalauthmap')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'external_auth.externalauthmap': {
'Meta': {'unique_together': "(('external_id', 'external_domain'),)", 'object_name': 'ExternalAuthMap'},
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dtsignup': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'external_credentials': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'external_domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'external_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'external_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_password': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True'})
}
}
complete_apps = ['external_auth']
| agpl-3.0 |
prasanna08/oppia | scripts/linters/test_files/invalid_urlencode.py | 4 | 1489 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test. This file is using urlencode which is not allowed.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import urllib
import python_utils
class FakeClass(python_utils.OBJECT):
"""This is a fake docstring for invalid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, source_url, doseq):
"""This doesn't do anything.
Args:
source_url: str. The URL.
doseq: bool. Boolean value.
Returns:
urlencode(object): Returns urlencode object.
"""
# Use of urlencode is not allowed.
return urllib.urlencode(source_url, doseq=doseq)
| apache-2.0 |
mahak/cloudify-cli | cloudify_cli/commands/users.py | 1 | 9023 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from .. import env
from ..cli import cfy
from ..table import print_data, print_single
from ..utils import handle_client_error
USER_COLUMNS = ['username', 'groups', 'role', 'group_system_roles', 'active',
'last_login_at', 'is_locked']
GET_DATA_COLUMNS = ['user_tenants', 'group_tenants']
NO_GET_DATA_COLUMNS = ['tenants']
USER_LABELS = {'role': 'system wide role',
'group_system_roles': 'system wide roles via groups'}
def _format_user(user):
user_tenants = dict(
(str(tenant), str(user.user_tenants[tenant]))
for tenant in user.user_tenants
)
group_tenants = dict(
(str(tenant),
dict(
(str(role),
[str(group) for group in user.group_tenants[tenant][role]])
for role in user.group_tenants[tenant]
))
for tenant in user.group_tenants
)
user['user_tenants'] = str(user_tenants)[1:-1]
user['group_tenants'] = str(group_tenants)[1:-1]
return user
def _format_group_system_roles(user):
group_system_roles = dict(
(str(role),
[str(user_group) for user_group in user['group_system_roles'][role]])
for role in user['group_system_roles']
)
user['group_system_roles'] = str(group_system_roles).strip('{}')
return user
@cfy.group(name='users')
@cfy.options.common_options
def users():
"""Handle Cloudify users
"""
if not env.is_initialized():
env.raise_uninitialized()
@users.command(name='list', short_help='List users [manager only]')
@cfy.options.sort_by('username')
@cfy.options.descending
@cfy.options.common_options
@cfy.options.get_data
@cfy.options.search
@cfy.options.pagination_offset
@cfy.options.pagination_size
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def list(sort_by,
descending,
get_data,
search,
pagination_offset,
pagination_size,
logger,
client):
"""List all users
"""
logger.info('Listing all users...')
users_list = client.users.list(
sort=sort_by,
is_descending=descending,
_get_data=get_data,
_search=search,
_offset=pagination_offset,
_size=pagination_size
)
total = users_list.metadata.pagination.total
# copy list
columns = [] + USER_COLUMNS
users_list = [_format_group_system_roles(user) for user in users_list]
if get_data:
users_list = [_format_user(user) for user in users_list]
columns += GET_DATA_COLUMNS
else:
columns += NO_GET_DATA_COLUMNS
print_data(columns, users_list, 'Users:', labels=USER_LABELS)
logger.info('Showing {0} of {1} users'.format(len(users_list), total))
@users.command(name='create', short_help='Create a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.common_options
@cfy.options.security_role
@cfy.options.password
@cfy.options.tenant_name(required=False)
@cfy.options.user_tenant_role(required=False,
options_flags=['-l', '--user-tenant-role'])
@cfy.assert_manager_active()
@cfy.pass_client(use_tenant_in_header=False)
@cfy.pass_logger
def create(username,
security_role,
password,
tenant_name,
user_tenant_role,
logger,
client):
"""Create a new user on the manager
`USERNAME` is the username of the user
"""
client.users.create(username, password, security_role)
logger.info('User `{0}` created with `{1}` security role'.format(
username, security_role))
if tenant_name and user_tenant_role:
client.tenants.add_user(username, tenant_name, user_tenant_role)
logger.info(
'User `{0}` added successfully to tenant `{1}` with `{2}` role'
.format(username, tenant_name, user_tenant_role))
@users.command(name='set-password',
short_help='Set a new password for a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.password
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def set_password(username, password, logger, client):
"""Set a new password for a user
`USERNAME` is the username of the user
"""
logger.info('Setting new password for user {0}...'.format(username))
client.users.set_password(username, password)
logger.info('New password set')
@users.command(name='set-role',
short_help='Set a new role for a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.security_role
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def set_role(username, security_role, logger, client):
"""Set a new role for a user
`USERNAME` is the username of the user
"""
logger.info('Setting new role for user {0}...'.format(username))
client.users.set_role(username, security_role)
logger.info('New role `{0}` set'.format(security_role))
@users.command(name='get',
short_help='Get details for a single user [manager only]')
@cfy.argument(
'username', callback=cfy.validate_name, default=env.get_username())
@cfy.options.common_options
@cfy.options.get_data
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def get(username, get_data, logger, client):
"""Get details for a single user
`USERNAME` is the username of the user. (default: current user)
"""
logger.info('Getting info for user `{0}`...'.format(username))
if username == env.get_username():
user_details = client.users.get_self(_get_data=get_data)
else:
user_details = client.users.get(username, _get_data=get_data)
# copy list
columns = [] + USER_COLUMNS
if get_data:
_format_user(user_details)
columns += GET_DATA_COLUMNS
else:
columns += NO_GET_DATA_COLUMNS
print_single(columns,
user_details,
'Requested user info:',
labels=USER_LABELS)
@users.command(name='delete',
short_help='Delete a user [manager only]')
@cfy.argument('username', callback=cfy.validate_name)
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def delete(username, logger, client):
"""Delete a user
`USERNAME` is the username of the user
"""
logger.info('Deleting user `{0}`...'.format(username))
client.users.delete(username)
logger.info('User removed')
@users.command(name='activate',
short_help='Make an inactive user active [manager only]')
@cfy.argument('username')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def activate(username, logger, client):
"""Activate a user
`USERNAME` is the username of the user
"""
graceful_msg = 'User `{0}` is already active'.format(username)
logger.info('Activating user `{0}`...'.format(username))
with handle_client_error(409, graceful_msg, logger):
client.users.activate(username)
logger.info('User activated')
@users.command(name='deactivate',
short_help='Make an active user inactive [manager only]')
@cfy.argument('username')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def deactivate(username, logger, client):
"""Deactivate a user
`USERNAME` is the username of the user
"""
graceful_msg = 'User `{0}` is already inactive'.format(username)
logger.info('Deactivating user `{0}`...'.format(username))
with handle_client_error(409, graceful_msg, logger):
client.users.deactivate(username)
logger.info('User deactivated')
@users.command(name='unlock',
short_help='Unlock a locked user [manager only]')
@cfy.argument('username')
@cfy.options.common_options
@cfy.assert_manager_active()
@cfy.pass_client()
@cfy.pass_logger
def unlock(username, logger, client):
"""Unlock a locked user
`USERNAME` is the username of the user
"""
graceful_msg = 'User `{0}` is already unlocked'.format(username)
logger.info('Unlocking user `{0}`...'.format(username))
with handle_client_error(409, graceful_msg, logger):
client.users.unlock(username)
logger.info('User unlocked')
| apache-2.0 |
Krossom/python-for-android | python3-alpha/python3-src/Lib/encodings/aliases.py | 58 | 15133 | """ Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
## base64_codec codec
#'base64' : 'base64_codec',
#'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
## bz2_codec codec
#'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
## hex_codec codec
#'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macintosh' : 'mac_roman',
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
## quopri_codec codec
#'quopri' : 'quopri_codec',
#'quoted_printable' : 'quopri_codec',
#'quotedprintable' : 'quopri_codec',
## rot_13 codec
#'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
## uu_codec codec
#'uu' : 'uu_codec',
## zlib_codec codec
#'zip' : 'zlib_codec',
#'zlib' : 'zlib_codec',
# temporary mac CJK aliases, will be replaced by proper codecs in 3.1
'x_mac_japanese' : 'shift_jis',
'x_mac_korean' : 'euc_kr',
'x_mac_simp_chinese' : 'gb2312',
'x_mac_trad_chinese' : 'big5',
}
| apache-2.0 |
tony/flask | tests/test_blueprints.py | 143 | 18147 | # -*- coding: utf-8 -*-
"""
tests.blueprints
~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
def test_blueprint_specific_error_handling():
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
assert c.get('/frontend-no').data == b'frontend says no'
assert c.get('/backend-no').data == b'backend says no'
assert c.get('/what-is-a-sideend').data == b'application itself says no'
def test_blueprint_specific_user_error_handling():
class MyDecoratorException(Exception):
pass
class MyFunctionException(Exception):
pass
blue = flask.Blueprint('blue', __name__)
@blue.errorhandler(MyDecoratorException)
def my_decorator_exception_handler(e):
assert isinstance(e, MyDecoratorException)
return 'boom'
def my_function_exception_handler(e):
assert isinstance(e, MyFunctionException)
return 'bam'
blue.register_error_handler(MyFunctionException, my_function_exception_handler)
@blue.route('/decorator')
def blue_deco_test():
raise MyDecoratorException()
@blue.route('/function')
def blue_func_test():
raise MyFunctionException()
app = flask.Flask(__name__)
app.register_blueprint(blue)
c = app.test_client()
assert c.get('/decorator').data == b'boom'
assert c.get('/function').data == b'bam'
def test_blueprint_url_definitions():
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
assert c.get('/1/foo').data == b'23/42'
assert c.get('/2/foo').data == b'19/42'
assert c.get('/1/bar').data == b'23'
assert c.get('/2/bar').data == b'19'
def test_blueprint_url_processors():
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/de/'
def test_templates_and_static(test_apps):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello from the Frontend'
rv = c.get('/admin/')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/index2')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/static/test.txt')
assert rv.data.strip() == b'Admin File'
rv.close()
rv = c.get('/admin/static/css/test.css')
assert rv.data.strip() == b'/* nested file */'
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == expected_max_age
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt'
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
assert e.name == 'missing.html'
else:
assert 0, 'expected exception'
with flask.Flask(__name__).test_request_context():
assert flask.render_template('nested/nested.txt') == 'I\'m nested'
def test_default_static_cache_timeout():
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 100
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(test_apps):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
assert templates == ['admin/index.html', 'frontend/index.html']
def test_dotted_names():
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
assert c.get('/fe').data.strip() == b'/be'
assert c.get('/fe2').data.strip() == b'/fe'
assert c.get('/be').data.strip() == b'/fe'
def test_dotted_names_from_app():
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'/test/'
def test_empty_url_defaults():
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/').data == b'1'
assert c.get('/page/2').data == b'2'
def test_route_decorator_custom_endpoint():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
assert c.get('/').data == b'index'
assert c.get('/py/foo').data == b'bp.foo'
assert c.get('/py/bar').data == b'bp.bar'
assert c.get('/py/bar/123').data == b'bp.123'
assert c.get('/py/bar/foo').data == b'bp.bar_foo'
def test_route_decorator_custom_endpoint_with_dots():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
pytest.raises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
pytest.raises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
assert c.get('/py/foo').data == b'bp.foo'
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_add_template_test():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
| bsd-3-clause |
jasonseminara/OpenSourceFinal | lib/python3.5/site-packages/pip/_vendor/distlib/markers.py | 1261 | 6282 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
#elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| mit |
bclau/nova | tools/patch_tox_venv.py | 11 | 1659 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv # noqa
def first_file(file_list):
for candidate in file_list:
if os.path.exists(candidate):
return candidate
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.environ['VIRTUAL_ENV']
pip_requires = first_file([
os.path.join(root, 'requirements.txt'),
os.path.join(root, 'tools', 'pip-requires'),
])
test_requires = first_file([
os.path.join(root, 'test-requirements.txt'),
os.path.join(root, 'tools', 'test-requires'),
])
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'nova'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
#NOTE(dprince): For Tox we only run post_process (which patches files, etc)
install.post_process()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
nebril/fuel-web | tasklib/tasklib/tests/functional/test_run_exec.py | 4 | 2108 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tasklib.tests import base
from tasklib.utils import STATUS
class TestFunctionalExecTasks(base.BaseFunctionalTest):
"""Each test will follow next pattern:
1. Run test with provided name - taskcmd -c conf.yaml run test/test
2. check status of task
"""
def test_simple_run(self):
exit_code, out, err = self.execute(['run', 'exec/simple'])
self.assertEqual(exit_code, 0)
exit_code, out, err = self.execute(['status', 'exec/simple'])
self.assertEqual(out.strip('\n'), STATUS.end.name)
self.assertEqual(exit_code, 0)
def test_failed_run(self):
exit_code, out, err = self.execute(['run', 'exec/fail'])
self.assertEqual(exit_code, 2)
exit_code, out, err = self.execute(['status', 'exec/fail'])
self.assertEqual(out.strip('\n'), STATUS.failed.name)
self.assertEqual(exit_code, 2)
def test_error(self):
exit_code, out, err = self.execute(['run', 'exec/error'])
self.assertEqual(exit_code, 3)
exit_code, out, err = self.execute(['status', 'exec/error'])
self.assertEqual(out.strip('\n'), STATUS.error.name)
self.assertEqual(exit_code, 3)
def test_notfound(self):
exit_code, out, err = self.execute(['run', 'exec/notfound'])
self.assertEqual(exit_code, 4)
exit_code, out, err = self.execute(['status', 'exec/notfound'])
self.assertEqual(out.strip('\n'), STATUS.notfound.name)
self.assertEqual(exit_code, 4)
| apache-2.0 |
rednach/krill | modules/dummy_poller/module.py | 18 | 4992 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is an example of an Scheduler module
# Here for the configuration phase AND running one
import sys
import signal
import time
from Queue import Empty
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['poller'],
'type': 'dummy_poller',
'external': False,
# To be a real worker module, you must set this
'worker_capable': True,
}
# called by the plugin manager to get a broker
def get_instance(mod_conf):
logger.info("[Dummy Poller] Get a Dummy poller module for plugin %s", mod_conf.get_name())
instance = Dummy_poller(mod_conf)
return instance
# Just print some stuff
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
logger.info("[Dummy Poller] Initialization of the dummy poller module")
self.i_am_dying = False
# Get new checks if less than nb_checks_max
# If no new checks got and no check in queue,
# sleep for 1 sec
# REF: doc/shinken-action-queues.png (3)
def get_new_checks(self):
try:
while(True):
logger.debug("[Dummy Poller] I %d wait for a message", self.id)
msg = self.s.get(block=False)
if msg is not None:
self.checks.append(msg.get_data())
logger.debug("[Dummy Poller] I, %d, got a message!", self.id)
except Empty, exp:
if len(self.checks) == 0:
time.sleep(1)
# Launch checks that are in status
# REF: doc/shinken-action-queues.png (4)
def launch_new_checks(self):
# queue
for chk in self.checks:
if chk.status == 'queue':
logger.warning("[Dummy Poller] Dummy (bad) check for %s", str(chk.command))
chk.exit_status = 2
chk.get_outputs('All is NOT SO well', 8012)
chk.status = 'done'
chk.execution_time = 0.1
# Check the status of checks
# if done, return message finished :)
# REF: doc/shinken-action-queues.png (5)
def manage_finished_checks(self):
to_del = []
for action in self.checks:
to_del.append(action)
try:
self.returns_queue.put(action)
except IOError, exp:
logger.info("[Dummy Poller] %d exiting: %s", self.id, exp)
sys.exit(2)
for chk in to_del:
self.checks.remove(chk)
# id = id of the worker
# s = Global Queue Master->Slave
# m = Queue Slave->Master
# return_queue = queue managed by manager
# c = Control Queue for the worker
def work(self, s, returns_queue, c):
logger.info("[Dummy Poller] Module Dummy started!")
## restore default signal handler for the workers:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
timeout = 1.0
self.checks = []
self.returns_queue = returns_queue
self.s = s
self.t_each_loop = time.time()
while True:
begin = time.time()
msg = None
cmsg = None
# If we are dying (big problem!) we do not
# take new jobs, we just finished the current one
if not self.i_am_dying:
# REF: doc/shinken-action-queues.png (3)
self.get_new_checks()
# REF: doc/shinken-action-queues.png (4)
self.launch_new_checks()
# REF: doc/shinken-action-queues.png (5)
self.manage_finished_checks()
# Now get order from master
try:
cmsg = c.get(block=False)
if cmsg.get_type() == 'Die':
logger.info("[Dummy Poller] %d : Dad say we are dying...", self.id)
break
except Exception:
pass
timeout -= time.time() - begin
if timeout < 0:
timeout = 1.0
| agpl-3.0 |
nicoboss/Floatmotion | OpenGL/GL/ARB/texture_env_add.py | 9 | 1124 | '''OpenGL extension ARB.texture_env_add
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_env_add to provide a more
Python-friendly API
Overview (from the spec)
New texture environment function ADD is supported with the following
equation:
Cv = min(1, Cf + Ct)
New function may be specified by calling TexEnv with ADD token.
One possible application is to add a specular highlight texture to
a Gouraud-shaded primitive to emulate Phong shading, in a single
pass.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_env_add.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_env_add import *
from OpenGL.raw.GL.ARB.texture_env_add import _EXTENSION_NAME
def glInitTextureEnvAddARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | agpl-3.0 |
openstack/heat | heat/engine/update.py | 1 | 12695 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stk_defn
from heat.objects import resource as resource_objects
LOG = logging.getLogger(__name__)
class StackUpdate(object):
"""A Task to perform the update of an existing stack to a new template."""
def __init__(self, existing_stack, new_stack, previous_stack,
rollback=False):
"""Initialise with the existing stack and the new stack."""
self.existing_stack = existing_stack
self.new_stack = new_stack
self.previous_stack = previous_stack
self.rollback = rollback
self.existing_snippets = dict((n, r.frozen_definition())
for n, r in self.existing_stack.items()
if n in self.new_stack)
def __repr__(self):
if self.rollback:
return '%s Rollback' % str(self.existing_stack)
else:
return '%s Update' % str(self.existing_stack)
def __call__(self):
"""Return a co-routine that updates the stack."""
cleanup_prev = scheduler.DependencyTaskGroup(
self.previous_stack.dependencies,
self._remove_backup_resource,
reverse=True)
def get_error_wait_time(resource):
return resource.cancel_grace_period()
updater = scheduler.DependencyTaskGroup(
self.dependencies(),
self._resource_update,
error_wait_time=get_error_wait_time)
if not self.rollback:
yield from cleanup_prev()
try:
yield from updater()
finally:
self.previous_stack.reset_dependencies()
def _resource_update(self, res):
if res.name in self.new_stack and self.new_stack[res.name] is res:
return self._process_new_resource_update(res)
else:
return self._process_existing_resource_update(res)
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s", prev_res.name)
yield from prev_res.destroy()
@staticmethod
def _exchange_stacks(existing_res, prev_res):
resource_objects.Resource.exchange_stacks(existing_res.stack.context,
existing_res.id, prev_res.id)
prev_stack, existing_stack = prev_res.stack, existing_res.stack
prev_stack.add_resource(existing_res)
existing_stack.add_resource(prev_res)
def _create_resource(self, new_res):
res_name = new_res.name
# Clean up previous resource
if res_name in self.previous_stack:
prev_res = self.previous_stack[res_name]
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
LOG.debug("Swapping in backup Resource %s", res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
LOG.debug("Deleting backup Resource %s", res_name)
yield from prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
LOG.debug("Backing up existing Resource %s", res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack.add_resource(existing_res)
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
self.existing_stack.add_resource(new_res)
# Save new resource definition to backup stack if it is not
# present in backup stack template already
# it allows to resolve all dependencies that existing resource
# can have if it was copied to backup stack
if (res_name not in
self.previous_stack.t[self.previous_stack.t.RESOURCES]):
LOG.debug("Storing definition of new Resource %s", res_name)
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
yield from new_res.create()
self._update_resource_data(new_res)
def _check_replace_restricted(self, res):
registry = res.stack.env.registry
restricted_actions = registry.get_rsrc_restricted_actions(res.name)
existing_res = self.existing_stack[res.name]
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, existing_res,
existing_res.UPDATE)
existing_res._add_event(existing_res.UPDATE, existing_res.FAILED,
str(ex))
raise failure
def _update_resource_data(self, resource):
# Use the *new* template to determine the attrs to cache
node_data = resource.node_data(self.new_stack.defn)
stk_defn.update_resource_data(self.existing_stack.defn,
resource.name, node_data)
# Also update the new stack's definition with the data, so that
# following resources can calculate dep_attr values correctly (e.g. if
# the actual attribute name in a get_attr function also comes from a
# get_attr function.)
stk_defn.update_resource_data(self.new_stack.defn,
resource.name, node_data)
def _process_new_resource_update(self, new_res):
res_name = new_res.name
if res_name in self.existing_stack:
existing_res = self.existing_stack[res_name]
is_substituted = existing_res.check_is_substituted(type(new_res))
if type(existing_res) is type(new_res) or is_substituted:
try:
yield from self._update_in_place(existing_res,
new_res,
is_substituted)
except resource.UpdateReplace:
pass
else:
# Save updated resource definition to backup stack
# cause it allows the backup stack resources to be
# synchronized
LOG.debug("Storing definition of updated Resource %s",
res_name)
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
self.existing_stack.t.add_resource(new_res.t)
self.existing_stack.t.store(self.existing_stack.context)
LOG.info("Resource %(res_name)s for stack "
"%(stack_name)s updated",
{'res_name': res_name,
'stack_name': self.existing_stack.name})
self._update_resource_data(existing_res)
return
else:
self._check_replace_restricted(new_res)
yield from self._create_resource(new_res)
def _update_in_place(self, existing_res, new_res, is_substituted=False):
existing_snippet = self.existing_snippets[existing_res.name]
prev_res = self.previous_stack.get(new_res.name)
# Note the new resource snippet is resolved in the context
# of the existing stack (which is the stack being updated)
# but with the template of the new stack (in case the update
# is switching template implementations)
new_snippet = new_res.t.reparse(self.existing_stack.defn,
self.new_stack.t)
if is_substituted:
substitute = type(new_res)(existing_res.name,
existing_res.t,
existing_res.stack)
existing_res.stack.resources[existing_res.name] = substitute
existing_res = substitute
existing_res.converge = self.new_stack.converge
yield from existing_res.update(new_snippet, existing_snippet,
prev_resource=prev_res)
def _process_existing_resource_update(self, existing_res):
res_name = existing_res.name
if res_name in self.previous_stack:
backup_res = self.previous_stack[res_name]
yield from self._remove_backup_resource(backup_res)
if res_name in self.new_stack:
new_res = self.new_stack[res_name]
if new_res.state == (new_res.INIT, new_res.COMPLETE):
# Already updated in-place
return
if existing_res.stack is not self.previous_stack:
yield from existing_res.destroy()
if res_name not in self.new_stack:
self.existing_stack.remove_resource(res_name)
def dependencies(self):
"""Return the Dependencies graph for the update.
Returns a Dependencies object representing the dependencies between
update operations to move from an existing stack definition to a new
one.
"""
existing_deps = self.existing_stack.dependencies
new_deps = self.new_stack.dependencies
def edges():
# Create/update the new stack's resources in create order
for e in new_deps.graph().edges():
yield e
# Destroy/cleanup the old stack's resources in delete order
for e in existing_deps.graph(reverse=True).edges():
yield e
# Don't cleanup old resources until after they have been replaced
for name, res in self.existing_stack.items():
if name in self.new_stack:
yield (res, self.new_stack[name])
return dependencies.Dependencies(edges())
def preview(self):
upd_keys = set(self.new_stack.resources.keys())
cur_keys = set(self.existing_stack.resources.keys())
common_keys = cur_keys.intersection(upd_keys)
deleted_keys = cur_keys.difference(upd_keys)
added_keys = upd_keys.difference(cur_keys)
updated_keys = []
replaced_keys = []
for key in common_keys:
current_res = self.existing_stack.resources[key]
updated_res = self.new_stack.resources[key]
current_props = current_res.frozen_definition().properties(
current_res.properties_schema, current_res.context)
updated_props = updated_res.frozen_definition().properties(
updated_res.properties_schema, updated_res.context)
# type comparison must match that in _process_new_resource_update
if type(current_res) is not type(updated_res):
replaced_keys.append(key)
continue
try:
if current_res.preview_update(updated_res.frozen_definition(),
current_res.frozen_definition(),
updated_props, current_props,
None):
updated_keys.append(key)
except resource.UpdateReplace:
replaced_keys.append(key)
return {
'unchanged': list(set(common_keys).difference(
set(updated_keys + replaced_keys))),
'updated': updated_keys,
'replaced': replaced_keys,
'added': list(added_keys),
'deleted': list(deleted_keys),
}
| apache-2.0 |
ally24k/autocomplete | lib/flask/ctx.py | 776 | 14266 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| apache-2.0 |
ray-project/ray | rllib/env/wrappers/tests/test_exception_wrapper.py | 2 | 1736 | import random
import unittest
import gym
from ray.rllib.env.wrappers.exception_wrapper import ResetOnExceptionWrapper, \
TooManyResetAttemptsException
class TestResetOnExceptionWrapper(unittest.TestCase):
def test_unstable_env(self):
class UnstableEnv(gym.Env):
observation_space = gym.spaces.Discrete(2)
action_space = gym.spaces.Discrete(2)
def step(self, action):
if random.choice([True, False]):
raise ValueError("An error from a unstable environment.")
return self.observation_space.sample(), 0.0, False, {}
def reset(self):
return self.observation_space.sample()
env = UnstableEnv()
env = ResetOnExceptionWrapper(env)
try:
self._run_for_100_steps(env)
except Exception:
self.fail()
def test_very_unstable_env(self):
class VeryUnstableEnv(gym.Env):
observation_space = gym.spaces.Discrete(2)
action_space = gym.spaces.Discrete(2)
def step(self, action):
return self.observation_space.sample(), 0.0, False, {}
def reset(self):
raise ValueError("An error from a very unstable environment.")
env = VeryUnstableEnv()
env = ResetOnExceptionWrapper(env)
self.assertRaises(TooManyResetAttemptsException,
lambda: self._run_for_100_steps(env))
@staticmethod
def _run_for_100_steps(env):
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 |
diefenbach/django-lfs | lfs/marketing/models.py | 1 | 1821 | # django imports
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
# lfs imports
from lfs.catalog.models import Product
from lfs.order.models import Order
class Topseller(models.Model):
"""Selected products are in any case among topsellers.
"""
product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product"))
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
class Meta:
ordering = ["position"]
app_label = 'marketing'
def __str__(self):
return u"%s (%s)" % (self.product.name, self.position)
class ProductSales(models.Model):
"""Stores totals sales per product.
"""
product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product"))
sales = models.IntegerField(_(u"sales"), default=0)
class Meta:
app_label = 'marketing'
class FeaturedProduct(models.Model):
"""Featured products are manually selected by the shop owner
"""
product = models.ForeignKey(Product, models.CASCADE, verbose_name=_(u"Product"))
position = models.PositiveSmallIntegerField(_(u"Position"), default=1)
active = models.BooleanField(_(u"Active"), default=True)
class Meta:
ordering = ["position"]
app_label = 'marketing'
def __str__(self):
return u"%s (%s)" % (self.product.name, self.position)
class OrderRatingMail(models.Model):
"""Saves whether and when a rating mail has been send for an order.
"""
order = models.ForeignKey(Order, models.CASCADE, verbose_name=_(u"Order"))
send_date = models.DateTimeField(auto_now=True)
def __str__(self):
return u"%s (%s)" % (self.order.id, self.send_date.strftime(ugettext('DATE_FORMAT')))
class Meta:
app_label = 'marketing'
| bsd-3-clause |
JCROM-Android/jcrom_external_chromium_org | third_party/tlslite/tlslite/X509CertChain.py | 76 | 9052 | """Class representing an X.509 certificate chain."""
from utils import cryptomath
from X509 import X509
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def parseChain(self, s):
"""Parse a PEM-encoded X.509 certificate file chain file.
@type s: str
@param s: A PEM-encoded (eg: Base64) X.509 certificate file, with every
certificate wrapped within "-----BEGIN CERTIFICATE-----" and
"-----END CERTIFICATE-----" tags). Extraneous data outside such tags,
such as human readable representations, will be ignored.
"""
class PEMIterator(object):
"""Simple iterator over PEM-encoded certificates within a string.
@type data: string
@ivar data: A string containing PEM-encoded (Base64) certificates,
with every certificate wrapped within "-----BEGIN CERTIFICATE-----"
and "-----END CERTIFICATE-----" tags). Extraneous data outside such
tags, such as human readable representations, will be ignored.
@type index: integer
@ivar index: The current offset within data to begin iterating from.
"""
_CERTIFICATE_HEADER = "-----BEGIN CERTIFICATE-----"
"""The PEM encoding block header for X.509 certificates."""
_CERTIFICATE_FOOTER = "-----END CERTIFICATE-----"
"""The PEM encoding block footer for X.509 certificates."""
def __init__(self, s):
self.data = s
self.index = 0
def __iter__(self):
return self
def next(self):
"""Iterates and returns the next L{tlslite.X509.X509}
certificate in data.
@rtype tlslite.X509.X509
"""
self.index = self.data.find(self._CERTIFICATE_HEADER,
self.index)
if self.index == -1:
raise StopIteration
end = self.data.find(self._CERTIFICATE_FOOTER, self.index)
if end == -1:
raise StopIteration
certStr = self.data[self.index+len(self._CERTIFICATE_HEADER) :
end]
self.index = end + len(self._CERTIFICATE_FOOTER)
bytes = cryptomath.base64ToBytes(certStr)
return X509().parseBinary(bytes)
self.x509List = list(PEMIterator(s))
return self
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True | bsd-3-clause |
olologin/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
40223137/w1717 | static/Brython3.1.0-20150301-090019/Lib/unittest/mock.py | 739 | 71473 | # mock.py
# Test tools for mocking and patching.
# Maintained by Michael Foord
# Backport for other versions of Python available from
# http://pypi.python.org/pypi/mock
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0'
import inspect
import pprint
import sys
from functools import wraps
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
FILTER_DIR = True
# Workaround for issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, type) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if isinstance(func, type) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
# instance methods and classmethods need to lose the self argument
if getattr(func, '__self__', None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
# we explicitly don't copy func.__dict__ into this copy as it would
# expose original attributes that should be mocked
funcopy.__module__ = func.__module__
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, type):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, type):
# already an instance
return getattr(obj, '__call__', None) is not None
# *could* be broken by a class overriding __mro__ or __dict__ via
# a metaclass
for base in (obj,) + obj.__mro__:
if base.__dict__.get('__call__') is not None:
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, type)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not name.isidentifier():
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(unittest.mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_safe_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, type):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
delegated = self._mock_delegate
if delegated is None:
return self._mock_side_effect
return delegated.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
delegated = self._mock_delegate
if delegated is None:
self._mock_side_effect = value
else:
delegated.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
if not FILTER_DIR:
return object.__dir__(self)
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_safe_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
if result is DEFAULT:
result = self.return_value
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, type):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
extra_args = []
entered_patchers = []
exc_info = tuple()
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, type):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) is str:
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, str):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, type):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
"bool next "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex',
'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir', 'subclasses', 'format',
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_safe_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, str):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, str):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, str):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, str):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, type)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, type):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# it is possible for objects to have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
)
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
| gpl-3.0 |
ehashman/oh-mainline | vendor/packages/Pygments/pygments/lexers/dalvik.py | 364 | 3442 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dalvik
~~~~~~~~~~~~~~~~~~~~~~
Pygments lexers for Dalvik VM-related languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Keyword, Text, Comment, Name, String, Number, \
Punctuation
__all__ = ['SmaliLexer']
class SmaliLexer(RegexLexer):
"""
For `Smali <http://code.google.com/p/smali/>`_ (Android/Dalvik) assembly
code.
*New in Pygments 1.6.*
"""
name = 'Smali'
aliases = ['smali']
filenames = ['*.smali']
mimetypes = ['text/smali']
tokens = {
'root': [
include('comment'),
include('label'),
include('field'),
include('method'),
include('class'),
include('directive'),
include('access-modifier'),
include('instruction'),
include('literal'),
include('punctuation'),
include('type'),
include('whitespace')
],
'directive': [
(r'^[ \t]*\.(class|super|implements|field|subannotation|annotation|'
r'enum|method|registers|locals|array-data|packed-switch|'
r'sparse-switch|catchall|catch|line|parameter|local|prologue|'
r'epilogue|source)', Keyword),
(r'^[ \t]*\.end (field|subannotation|annotation|method|array-data|'
'packed-switch|sparse-switch|parameter|local)', Keyword),
(r'^[ \t]*\.restart local', Keyword),
],
'access-modifier': [
(r'(public|private|protected|static|final|synchronized|bridge|'
r'varargs|native|abstract|strictfp|synthetic|constructor|'
r'declared-synchronized|interface|enum|annotation|volatile|'
r'transient)', Keyword),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
],
'instruction': [
(r'\b[vp]\d+\b', Name.Builtin), # registers
(r'\b[a-z][A-Za-z0-9/-]+\s+', Text), # instructions
],
'literal': [
(r'".*"', String),
(r'0x[0-9A-Fa-f]+t?', Number.Hex),
(r'[0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+L?', Number.Integer),
],
'field': [
(r'(\$?\b)([A-Za-z0-9_$]*)(:)',
bygroups(Punctuation, Name.Variable, Punctuation)),
],
'method': [
(r'<(?:cl)?init>', Name.Function), # constructor
(r'(\$?\b)([A-Za-z0-9_$]*)(\()',
bygroups(Punctuation, Name.Function, Punctuation)),
],
'label': [
(r':[A-Za-z0-9_]+', Name.Label),
],
'class': [
# class names in the form Lcom/namespace/ClassName;
# I only want to color the ClassName part, so the namespace part is
# treated as 'Text'
(r'(L)((?:[A-Za-z0-9_$]+/)*)([A-Za-z0-9_$]+)(;)',
bygroups(Keyword.Type, Text, Name.Class, Text)),
],
'punctuation': [
(r'->', Punctuation),
(r'[{},\(\):=\.-]', Punctuation),
],
'type': [
(r'[ZBSCIJFDV\[]+', Keyword.Type),
],
'comment': [
(r'#.*?\n', Comment),
],
}
| agpl-3.0 |
rain2o/collective.pfg.skiplogic | setup.py | 1 | 1050 | from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='collective.pfg.skiplogic',
version=version,
description="Adds skip logic capabilities to ploneformgen forms",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='',
author='',
author_email='',
url='http://svn.plone.org/svn/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective', 'collective.pfg'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| gpl-2.0 |
adviti/melange | thirdparty/google_appengine/lib/django_1_2/tests/regressiontests/middleware_exceptions/tests.py | 51 | 1441 | import sys
from django.test import TestCase
from django.core.signals import got_request_exception
class TestException(Exception):
pass
class TestMiddleware(object):
def process_request(self, request):
raise TestException('Test Exception')
class MiddlewareExceptionTest(TestCase):
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def test_process_request(self):
self.client.handler._request_middleware.insert(0, TestMiddleware().process_request)
try:
response = self.client.get('/')
except TestException, e:
# Test client indefinitely re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception, e:
self.fail("Unexpected exception: %s" % e)
self.assertEquals(len(self.exceptions), 1)
exception, value, tb = self.exceptions[0]
self.assertEquals(value.args, ('Test Exception', ))
| apache-2.0 |
yasserglez/tagfs | packages/tagfs/contrib/django/db/backends/postgresql/introspection.py | 9 | 3694 | from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'IPAddressField',
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return cursor.description
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
try:
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[int(row[0][1:-1]) - 1] = (int(row[1][1:-1]) - 1, row[2])
except ValueError:
continue
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
return indexes
| mit |
Enchufa2/ns-3-dev-git | examples/tutorial/first.py | 102 | 2128 | # /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
import ns.applications
import ns.core
import ns.internet
import ns.network
import ns.point_to_point
ns.core.LogComponentEnable("UdpEchoClientApplication", ns.core.LOG_LEVEL_INFO)
ns.core.LogComponentEnable("UdpEchoServerApplication", ns.core.LOG_LEVEL_INFO)
nodes = ns.network.NodeContainer()
nodes.Create(2)
pointToPoint = ns.point_to_point.PointToPointHelper()
pointToPoint.SetDeviceAttribute("DataRate", ns.core.StringValue("5Mbps"))
pointToPoint.SetChannelAttribute("Delay", ns.core.StringValue("2ms"))
devices = pointToPoint.Install(nodes)
stack = ns.internet.InternetStackHelper()
stack.Install(nodes)
address = ns.internet.Ipv4AddressHelper()
address.SetBase(ns.network.Ipv4Address("10.1.1.0"),
ns.network.Ipv4Mask("255.255.255.0"))
interfaces = address.Assign(devices)
echoServer = ns.applications.UdpEchoServerHelper(9)
serverApps = echoServer.Install(nodes.Get(1))
serverApps.Start(ns.core.Seconds(1.0))
serverApps.Stop(ns.core.Seconds(10.0))
echoClient = ns.applications.UdpEchoClientHelper(interfaces.GetAddress(1), 9)
echoClient.SetAttribute("MaxPackets", ns.core.UintegerValue(1))
echoClient.SetAttribute("Interval", ns.core.TimeValue(ns.core.Seconds(1.0)))
echoClient.SetAttribute("PacketSize", ns.core.UintegerValue(1024))
clientApps = echoClient.Install(nodes.Get(0))
clientApps.Start(ns.core.Seconds(2.0))
clientApps.Stop(ns.core.Seconds(10.0))
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
| gpl-2.0 |
kaushik94/boto | boto/sdb/db/test_db.py | 33 | 5431 | import logging
import time
from datetime import datetime
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty
from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty
from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty
from boto.exception import SDBPersistenceError
logging.basicConfig()
log = logging.getLogger('test_db')
log.setLevel(logging.DEBUG)
_objects = {}
#
# This will eventually be moved to the boto.tests module and become a real unit test
# but for now it will live here. It shows examples of each of the Property types in
# use and tests the basic operations.
#
class TestBasic(Model):
name = StringProperty()
size = IntegerProperty()
foo = BooleanProperty()
date = DateTimeProperty()
class TestFloat(Model):
name = StringProperty()
value = FloatProperty()
class TestRequired(Model):
req = StringProperty(required=True, default='foo')
class TestReference(Model):
ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs')
class TestSubClass(TestBasic):
answer = IntegerProperty()
class TestPassword(Model):
password = PasswordProperty()
class TestList(Model):
name = StringProperty()
nums = ListProperty(int)
class TestMap(Model):
name = StringProperty()
map = MapProperty()
class TestListReference(Model):
name = StringProperty()
basics = ListProperty(TestBasic)
class TestAutoNow(Model):
create_date = DateTimeProperty(auto_now_add=True)
modified_date = DateTimeProperty(auto_now=True)
class TestUnique(Model):
name = StringProperty(unique=True)
def test_basic():
global _objects
t = TestBasic()
t.name = 'simple'
t.size = -42
t.foo = True
t.date = datetime.now()
log.debug('saving object')
t.put()
_objects['test_basic_t'] = t
time.sleep(5)
log.debug('now try retrieving it')
tt = TestBasic.get_by_id(t.id)
_objects['test_basic_tt'] = tt
assert tt.id == t.id
l = TestBasic.get_by_id([t.id])
assert len(l) == 1
assert l[0].id == t.id
assert t.size == tt.size
assert t.foo == tt.foo
assert t.name == tt.name
#assert t.date == tt.date
return t
def test_float():
global _objects
t = TestFloat()
t.name = 'float object'
t.value = 98.6
log.debug('saving object')
t.save()
_objects['test_float_t'] = t
time.sleep(5)
log.debug('now try retrieving it')
tt = TestFloat.get_by_id(t.id)
_objects['test_float_tt'] = tt
assert tt.id == t.id
assert tt.name == t.name
assert tt.value == t.value
return t
def test_required():
global _objects
t = TestRequired()
_objects['test_required_t'] = t
t.put()
return t
def test_reference(t=None):
global _objects
if not t:
t = test_basic()
tt = TestReference()
tt.ref = t
tt.put()
time.sleep(10)
tt = TestReference.get_by_id(tt.id)
_objects['test_reference_tt'] = tt
assert tt.ref.id == t.id
for o in t.refs:
log.debug(o)
def test_subclass():
global _objects
t = TestSubClass()
_objects['test_subclass_t'] = t
t.name = 'a subclass'
t.size = -489
t.save()
def test_password():
global _objects
t = TestPassword()
_objects['test_password_t'] = t
t.password = "foo"
t.save()
time.sleep(5)
# Make sure it stored ok
tt = TestPassword.get_by_id(t.id)
_objects['test_password_tt'] = tt
#Testing password equality
assert tt.password == "foo"
#Testing password not stored as string
assert str(tt.password) != "foo"
def test_list():
global _objects
t = TestList()
_objects['test_list_t'] = t
t.name = 'a list of ints'
t.nums = [1, 2, 3, 4, 5]
t.put()
tt = TestList.get_by_id(t.id)
_objects['test_list_tt'] = tt
assert tt.name == t.name
for n in tt.nums:
assert isinstance(n, int)
def test_list_reference():
global _objects
t = TestBasic()
t.put()
_objects['test_list_ref_t'] = t
tt = TestListReference()
tt.name = "foo"
tt.basics = [t]
tt.put()
time.sleep(5)
_objects['test_list_ref_tt'] = tt
ttt = TestListReference.get_by_id(tt.id)
assert ttt.basics[0].id == t.id
def test_unique():
global _objects
t = TestUnique()
name = 'foo' + str(int(time.time()))
t.name = name
t.put()
_objects['test_unique_t'] = t
time.sleep(10)
tt = TestUnique()
_objects['test_unique_tt'] = tt
tt.name = name
try:
tt.put()
assert False
except(SDBPersistenceError):
pass
def test_datetime():
global _objects
t = TestAutoNow()
t.put()
_objects['test_datetime_t'] = t
time.sleep(5)
tt = TestAutoNow.get_by_id(t.id)
assert tt.create_date.timetuple() == t.create_date.timetuple()
def test():
log.info('test_basic')
t1 = test_basic()
log.info('test_required')
test_required()
log.info('test_reference')
test_reference(t1)
log.info('test_subclass')
test_subclass()
log.info('test_password')
test_password()
log.info('test_list')
test_list()
log.info('test_list_reference')
test_list_reference()
log.info("test_datetime")
test_datetime()
log.info('test_unique')
test_unique()
if __name__ == "__main__":
test()
| mit |
benregn/itu-courses | itu/pipelines.py | 1 | 1027 | import pymongo
from scrapy.exceptions import DropItem
from scrapy.conf import settings
from scrapy import log
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.Connection(
settings['MONGODB_SERVER'], settings['MONGODB_PORT'])
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
def process_item(self, item, spider):
valid = True
for data in item:
# here we only check if the data is not null
# but we could do any crazy validation we want
if not data:
valid = False
raise DropItem(
"Missing %s course from %s" % (data, item['url']))
if valid:
self.collection.insert(dict(item))
log.msg("Item written to MongoDB database %s/%s" %
(settings['MONGODB_DB'], settings['MONGODB_COLLECTION']),
level=log.DEBUG, spider=spider)
return item
| mit |
mnip91/proactive-component-monitoring | dev/scripts/perf/perf_graph.py | 12 | 2516 | #!/usr/bin/env python
import sys
import os
import string
import numpy as np
import matplotlib.pyplot as plt
import re
def main():
dir = sys.argv[1]
if len(sys.argv) == 1:
dict = create_dict(dir)
draw_graph(dict)
else:
for i in range(2, len(sys.argv)):
dict = create_dict(dir, sys.argv[i])
draw_graph(dict, sys.argv[i])
def create_dict(rootdir, match='.*'):
pattern = re.compile(match)
dict = {}
for branch in os.listdir(rootdir):
branch_dict = {}
for test in os.listdir(os.path.join(rootdir, branch)):
if pattern.match(test):
file = open(os.path.join(rootdir, branch, test))
str = file.readline()
str = str.strip()
start = str.find("=")
if start != -1:
branch_dict[test] = round(string.atof(str[start+1:]),2)
else:
branch_dict[test] = -1.
dict[branch] = branch_dict
return dict
def get_all_test_name(dict):
for branch in dict:
return dict[branch].keys()
def get_branches(dict):
return dict.keys()
def compare_by_branch(dict):
def local_print(test, d):
print test
for t in d:
print "\t" + t + "\t" + str(d[t])
print
for test in get_all_test_name(dict):
local_dict = {}
for branch in dict:
local_dict[branch] = dict[branch][test]
local_print(test, local_dict)
### Unused ###
def short_test_name(long_name):
return long_name[long_name.rfind('.Test')+5:]
def draw_graph(dict, title):
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
def set_legend(bars, branches):
bs = ()
for bar in bars:
bs = bs + (bar[0],)
ax.legend( bs, branches)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'b']
branches = get_branches(dict)
all_tests = get_all_test_name(dict)
N = len(all_tests)
ind = np.arange(N)
width = 0.35
fig = plt.figure()
ax = fig.add_subplot(111)
data_sets = []
for branch in branches:
data =()
for test in all_tests:
data = data + (dict[branch].get(test, 0),)
data_sets.append(data)
bars = []
counter = 0
for data in data_sets:
bar = ax.bar(ind + (counter*width), data, width, color=colors[counter])
bars.append(bar)
counter += 1
# add some
ax.set_ylabel('Perf')
ax.set_title('Branch perf comparison for ' + title)
ax.set_xticks(ind+width)
ax.set_xticklabels(map(short_test_name, all_tests))
set_legend(bars, branches)
for bar in bars:
autolabel(bar)
plt.savefig(title + ".png")
if __name__ == "__main__":
main()
| agpl-3.0 |
HonzaKral/django | django/apps/config.py | 121 | 8077 | import os
from importlib import import_module
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| bsd-3-clause |
robertnishihara/ray | streaming/python/tests/test_word_count.py | 1 | 1689 | import os
import ray
from ray.streaming import StreamingContext
def test_word_count():
ray.init(_load_code_from_local=True)
ctx = StreamingContext.Builder() \
.build()
ctx.read_text_file(__file__) \
.set_parallelism(1) \
.flat_map(lambda x: x.split()) \
.map(lambda x: (x, 1)) \
.key_by(lambda x: x[0]) \
.reduce(lambda old_value, new_value:
(old_value[0], old_value[1] + new_value[1])) \
.filter(lambda x: "ray" not in x) \
.sink(lambda x: print("result", x))
ctx.submit("word_count")
import time
time.sleep(3)
ray.shutdown()
def test_simple_word_count():
ray.init(_load_code_from_local=True)
ctx = StreamingContext.Builder() \
.build()
sink_file = "/tmp/ray_streaming_test_simple_word_count.txt"
if os.path.exists(sink_file):
os.remove(sink_file)
def sink_func(x):
with open(sink_file, "a") as f:
line = "{}:{},".format(x[0], x[1])
print("sink_func", line)
f.write(line)
ctx.from_values("a", "b", "c") \
.set_parallelism(1) \
.flat_map(lambda x: [x, x]) \
.map(lambda x: (x, 1)) \
.key_by(lambda x: x[0]) \
.reduce(lambda old_value, new_value:
(old_value[0], old_value[1] + new_value[1])) \
.sink(sink_func)
ctx.submit("word_count")
import time
time.sleep(3)
ray.shutdown()
with open(sink_file, "r") as f:
result = f.read()
assert "a:2" in result
assert "b:2" in result
assert "c:2" in result
if __name__ == "__main__":
test_word_count()
test_simple_word_count()
| apache-2.0 |
letolab/airy | airy/utils/cache.py | 1 | 9676 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
from airy.core.conf import settings
from airy.core.cache import get_cache
from airy.utils.encoding import smart_str, iri_to_uri
from airy.utils.http import http_date
from airy.utils.hashcompat import md5_constructor
from airy.utils.translation import get_language
from airy.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If enabled, returns the cache key ending with a locale."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, request.method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| bsd-2-clause |
tboyce021/home-assistant | homeassistant/components/timer/reproduce_state.py | 16 | 2247 | """Reproduce an Timer state."""
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import (
ATTR_DURATION,
DOMAIN,
SERVICE_CANCEL,
SERVICE_PAUSE,
SERVICE_START,
STATUS_ACTIVE,
STATUS_IDLE,
STATUS_PAUSED,
)
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATUS_IDLE, STATUS_ACTIVE, STATUS_PAUSED}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state and cur_state.attributes.get(
ATTR_DURATION
) == state.attributes.get(ATTR_DURATION):
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATUS_ACTIVE:
service = SERVICE_START
if ATTR_DURATION in state.attributes:
service_data[ATTR_DURATION] = state.attributes[ATTR_DURATION]
elif state.state == STATUS_PAUSED:
service = SERVICE_PAUSE
elif state.state == STATUS_IDLE:
service = SERVICE_CANCEL
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Timer states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
| apache-2.0 |
webbhorn/netgroups | tools/perf/util/setup.py | 242 | 1531 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
837468220/python-for-android | python3-alpha/python3-src/Lib/test/test_sys.py | 47 | 32005 | import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
if os.name == "nt":
raise unittest.SkipTest(
"under Windows, test would generate a spurious crash dialog")
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertTrue(
b"Fatal Python error: Cannot recover from stack overflow" in err,
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading, _thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(_thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = _thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 5)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "division_warning",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, "\xa2\n".encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
def test_executable(self):
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1<<9
def setUp(self):
self.c = len(struct.pack('c', b' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
vh = self.vheader
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), size(vh + 'PP') + gc_header_size)
def test_default(self):
h = self.header
vh = self.vheader
size = self.calcsize
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size(vh) + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(vh) + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size(h + 'P'))
# code
check(get_cell().__code__, size(h + '5i8Pi3P'))
# complex
check(complex(0,1), size(h + '2d'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.keys(), size(h + 'P'))
# dictionary-valueiterator
check({}.values(), size(h + 'P'))
# dictionary-itemiterator
check({}.items(), size(h + 'P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# BaseException
check(BaseException(), size(h + '5P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5P 2P2PP'))
# UnicodeDecodeError
# XXX
# check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size(h + '5P 2P2PP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# reverse
check(reversed(''), size(h + 'PP'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '11P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0, size(vh))
check(1, size(vh) + self.longdigit)
check(-1, size(vh) + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2), size(vh) + 3*self.longdigit)
# memory
check(memoryview(b''), size(h + 'PP2P2i7P'))
# module
check(unittest, size(h + '3P'))
# None
check(None, size(h + ''))
# NotImplementedType
check(NotImplemented, size(h))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# range
check(range(1), size(h + '4P'))
check(range(66000), size(h + '4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(0), size(h + '3P'))
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('16Pi17P 3P 10P 2P 2P')
check(int, s)
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# unicode
usize = len('\0'.encode('unicode-internal'))
samples = ['', '1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
basicsize = size(h + 'PPPiP') + usize * (len(s) + 1)
check(s, basicsize)
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
znick/anytask | anytask/users/models.py | 1 | 9320 | # -*- coding: utf-8 -*-
import logging
import os
from courses.models import Course
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from groups.models import Group
from mail.models import Message
from users.model_user_status import UserStatus
from years.common import get_current_year
from anytask.storage import OverwriteStorage
logger = logging.getLogger('django.request')
def get_upload_path(instance, filename):
return os.path.join('images', 'user_%d' % instance.user.id, filename)
class UserProfile(models.Model):
user = models.OneToOneField(User, db_index=True, null=False, blank=False, unique=True, related_name='profile')
middle_name = models.CharField(max_length=128, db_index=True, null=True, blank=True)
user_status = models.ManyToManyField(UserStatus, db_index=True, blank=True, related_name='users_by_status')
avatar = models.ImageField('profile picture', upload_to=get_upload_path, blank=True, null=True,
storage=OverwriteStorage())
birth_date = models.DateField(blank=True, null=True)
info = models.TextField(default="", blank=True, null=True)
phone = models.CharField(max_length=128, null=True, blank=True)
city_of_residence = models.CharField(max_length=191, null=True, blank=True)
university = models.CharField(max_length=191, null=True, blank=True)
university_in_process = models.BooleanField(null=False, blank=False, default=False)
university_class = models.CharField(max_length=191, null=True, blank=True)
university_department = models.CharField(max_length=191, null=True, blank=True)
university_year_end = models.CharField(max_length=191, null=True, blank=True)
additional_info = models.TextField(null=True, blank=True)
unit = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
position = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_degree = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_title = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
show_email = models.BooleanField(db_index=False, null=False, blank=False, default=True)
send_my_own_events = models.BooleanField(db_index=False, null=False, blank=False, default=False)
unread_messages = models.ManyToManyField(Message, blank=True, related_name='unread_messages')
deleted_messages = models.ManyToManyField(Message, blank=True, related_name='deleted_messages')
send_notify_messages = models.ManyToManyField(Message, blank=True, related_name='send_notify_messages')
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
login_via_yandex = models.BooleanField(db_index=False, null=False, blank=False, default=False)
ya_uid = models.IntegerField(null=True, blank=True)
ya_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_uid = models.CharField(max_length=191, null=True, blank=True)
ya_contest_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_uid = models.CharField(max_length=191, null=True, blank=True)
ya_passport_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_email = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
telegram_uid = models.IntegerField(default=None, null=True, blank=True)
notify_in_telegram = models.BooleanField(default=False, null=False, blank=False)
language = models.CharField(default="ru", max_length=128, unique=False, null=True, blank=True)
time_zone = models.TextField(null=False, blank=False, default='Europe/Moscow')
location = models.TextField(null=True, blank=True, default="")
def is_current_year_student(self):
return Group.objects.filter(year=get_current_year()).filter(students=self.user).count() > 0
def __unicode__(self):
return unicode(self.user)
def is_active(self):
for status in self.user_status.all():
if status.tag == 'not_active' or status.tag == 'academic':
return False
return True
def set_status(self, new_status):
if not isinstance(new_status, UserStatus):
new_status = UserStatus.objects.get(id=new_status)
if new_status.type:
self.user_status.remove(*self.user_status.filter(type=new_status.type))
self.user_status.add(new_status)
def get_unread_count(self):
return self.unread_messages.exclude(id__in=self.deleted_messages.all()).count()
def can_sync_contest(self):
for course in Course.objects.filter(is_active=True):
if course.get_user_group(self.user) and course.send_to_contest_from_users:
return True
return False
class UserProfileLog(models.Model):
user = models.ForeignKey(User, db_index=True, null=False, blank=False, related_name='profiles_logs_by_user')
middle_name = models.CharField(max_length=128, db_index=True, null=True, blank=True)
user_status = models.ManyToManyField(UserStatus, db_index=True, blank=True)
avatar = models.ImageField('profile picture', upload_to=get_upload_path, blank=True, null=True,
storage=OverwriteStorage())
birth_date = models.DateField(blank=True, null=True)
info = models.TextField(default="", blank=True, null=True)
phone = models.CharField(max_length=128, null=True, blank=True)
city_of_residence = models.CharField(max_length=191, null=True, blank=True)
university = models.CharField(max_length=191, null=True, blank=True)
university_in_process = models.BooleanField(null=False, blank=False, default=False)
university_class = models.CharField(max_length=50, null=True, blank=True)
university_department = models.CharField(max_length=191, null=True, blank=True)
university_year_end = models.CharField(max_length=20, null=True, blank=True)
additional_info = models.TextField(null=True, blank=True)
unit = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
position = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_degree = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
academic_title = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
show_email = models.BooleanField(db_index=False, null=False, blank=False, default=True)
send_my_own_events = models.BooleanField(db_index=False, null=False, blank=False, default=False)
unread_messages = models.ManyToManyField(Message, blank=True, related_name='log_unread_messages')
deleted_messages = models.ManyToManyField(Message, blank=True, related_name='log_deleted_messages')
send_notify_messages = models.ManyToManyField(Message, blank=True, related_name='log_send_notify_messages')
added_time = models.DateTimeField(auto_now_add=True) # remove default=timezone.now
update_time = models.DateTimeField(auto_now=True) # remove default=timezone.now
login_via_yandex = models.BooleanField(db_index=False, null=False, blank=False, default=True)
ya_uid = models.IntegerField(null=True, blank=True)
ya_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_uid = models.IntegerField(null=True, blank=True)
ya_contest_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_contest_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_uid = models.IntegerField(null=True, blank=True)
ya_passport_oauth = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_login = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
ya_passport_email = models.CharField(default="", max_length=128, unique=False, null=True, blank=True)
telegram_uid = models.IntegerField(default=None, null=True, blank=True)
notify_in_telegram = models.BooleanField(default=False, null=False, blank=False)
language = models.CharField(default="ru", max_length=128, unique=False, null=True, blank=True)
updated_by = models.ForeignKey(User, db_index=False, null=True, blank=True)
def is_current_year_student(self):
return Group.objects.filter(year=get_current_year()).filter(students=self.user).count() > 0
def __unicode__(self):
return unicode(self.user)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
| mit |
QQuick/Transcrypt | transcrypt/modules/org/transcrypt/autotester/__init__.py | 1 | 12645 | # First run a test from the command prompt, generating an HTML file.
# The output of the test is stored in a DIV.
# Also the script is automatically included in the HTML file.
# Loading the HTML file will run the script.
# This will compare the output of the script running in the browswer to the output in the DIV.
# If those two match, the test reports OK, else it reports failure.
from org.transcrypt.stubs.browser import __main__, __envir__, __pragma__
from org.transcrypt.autotester.html import HTMLGenerator, DataConverter, JSTesterUI, itemsAreEqual
# Don't import __envir__ from __base__ since it will overwrite __buildin__.__envir__ in the browser
# Import from stubs will be skipped in the browser
# ... The ice is a bit thin here
__pragma__ ('nokwargs')
import itertools
def getFileLocation(ancestor):
""" This function needs to crawl up the stack
and find out where the ancestor caller of
this function was in the source code of either the
python or javascript, depending on environment.
@param ancestor the ancestor of this function that
we want to capture file information about.
@return string indicating the file position and line number
"""
if __envir__.executor_name == __envir__.transpiler_name: # js
s = None
__pragma__('js', '{}',
'''
var e = new Error();
if ( ! e.stack ) {
console.log("MAJOR ISSUE: Browser Error lacks Stack");
} else {
s = e.stack;
}
''')
# Now we will process the stack to find the grandparent
# calling function
# @note - I'm explicitly not including a 're' module
# dependency here
frames = None
__pragma__('js', '{}',
'''
var linereg = new RegExp("\\n\\r|\\n", "g");
frames = s.toString().split(linereg);
''')
if ( frames is None or (len(frames) < 2)):
__pragma__('js', '{}', 'console.log("Failed to Split Stack");')
return("UNKNOWN:???")
# @note - if the call stack in transcrypts javascript
# translation changes then this index may need to change
# @todo - need more work here to determine this because
# this is fragile
gpFrame = frames[(ancestor*2 + 1)]
# This regex splits the string coming from the javascript
# stacktrace so that we can connect the file and line number
# runTests (http://localhost:8080/run/autotest.js:3159:8)
# func URL filename lineno:colno
# Group 1 = function
# Group 2 & 3 = protocol and hostname
# Group 4 = Path on this host (filename is at the end)
# Group 5 = lineno
# Group 6 = column number in file
frameReg = r"([^(]*)\(?([^:]*:)\/{2,3}([^:/]*:?)([^:]*):(\d+):(\d+)"
m = None
__pragma__('js', '{}',
'''
var r = new RegExp(frameReg);
m = r.exec(gpFrame);
''')
if m:
filepath = m[4]
# Split the filepath and take the last element
# to the get filename
pathParts = filepath.split("/")
filename = pathParts[len(pathParts)-1]
lineno = m[5]
return( "{}:{}".format(filename, lineno) )
else:
__pragma__('js', '{}', 'console.log("Failed to Match Frame", gpFrame);')
return("UNKNOWN:???")
#ELSE
# Needed because Transcrypt imports are compile time
__pragma__("skip")
from inspect import getframeinfo, stack
s = stack()
caller = getframeinfo(s[ancestor][0])
# Trim the file name path so that we don't get
# a lot of unnecessary content
filepath = caller.filename
# @todo - this is a hack - we should use os.path
pathParts = filepath.split('/')
filename = "/".join(pathParts[-2:])
return( "%s:%d" % (filename, caller.lineno))
__pragma__ ('noskip')
class AutoTester:
""" Main testing class for comparing CPython to Transcrypt. This
class is primarily used by calling the "check" method to confirm that
the result is the same in both environments and "done" when all checks
for a particular module have been completed.
"""
def __init__ (self, symbols = []):
self.symbols = symbols
# refDict/testDict contains the test results
# of each testlet identified by name as the key
self._currTestlet = "UNKNOWN"
self.testDict = {}
self.refDict = {}
if __envir__.executor_name == __envir__.transpiler_name:
self.ui = JSTesterUI()
else:
self.ui = None
def sortedRepr (self, any):
# When using sets or dicts, use elemens or keys
# of one type, in sort order
def tryGetNumKey (key):
if type (key) == str: # Try to interpret key as numerical, see comment with repr function in __builtins__
try:
return int (key)
except:
try:
return float (key)
except:
return key
else:
return key
if type (any) == dict:
return '{' + ', '.join ([
'{}: {}'.format (repr (key), repr (any [key]))
for index, key in enumerate (sorted ([tryGetNumKey (key) for key in any.keys ()], key = lambda aKey: str (aKey)))
]) + '}'
elif type (any) == set:
if len (any):
return '{' + ', '.join (sorted ([str (item) for item in list (any)])) + '}'
else:
return repr (any)
elif type (any) == range:
return repr (list (any))
else:
return repr (any)
__pragma__('kwargs')
def check (self, *args, ancestor = 2):
""" Given a set of values from either the python or transcrypt
environments, we log the position of the check call in the test
and representative values of the passed arguments for later
comparison.
"""
position=getFileLocation(ancestor)
# N.B. stubs.browser provides a special sorting repr
item = ' '.join ([self.sortedRepr (arg) for arg in args])
if __envir__.executor_name == __envir__.transpiler_name:
self.testDict[self._currTestlet].append((position,item))
else:
self.refDict[self._currTestlet].append((position,item))
__pragma__('nokwargs')
def expectException(self, func):
""" This method attempts to call the passed method and
checks to see whether an exception was generated.
@return string indicating "no exception" or "exception"
"""
try:
func()
return("no exception")
except Exception as exc:
return("exception")
def throwToError(self, func):
""" This function invokes the passed function and then
converts an exception to an error response so that
the unit test can continue even in the case where an
exception may or may not occur.
"""
try:
return(func())
except Exception as exc:
return (None, "!!!{}".format(str(exc)))
def checkEval(self, func):
""" Check the result of the passed function which is
invoked without arguments. If this function throws an
exception, that exception is caught and converted to an error
with can be compared against the result. This allows the
user to control for exception that may or may not be generated
in the unit tests
"""
ret = self.throwToError(func)
self.check(ret, ancestor = 3)
def checkPad(self, val, count):
""" This method is to help manage flow control in unit tests and
keep all unit tests aligned
"""
for i in range(0, count):
self.check(val)
def _getTotalErrorCnt(self, testData, refData):
""" This method determines the total number of non-matching
values in the test and reference data for a particular module.
"""
errCount = 0
for i,(refPos, refItem) in enumerate(refData):
try:
testPos,testItem = testData[i]
if not itemsAreEqual (testItem, refItem):
errCount+=1
except:
errCount+=1
return(errCount)
def compare (self):
# Load the python reference data from the hidden HTML div
dc = DataConverter()
self.refDict = dc.getPythonResults()
totalErrors = 0
sKeys = sorted(self.refDict.keys())
for key in sKeys:
refData = self.refDict[key]
try:
testData = self.testDict[key]
if ( testData is None ):
raise KeyError("No Test Data Module: {}".format(key))
except KeyError:
# No Test Data found for this key - we will populate with
# errors for all ref data
self.ui.appendSeqRowName(key, len(refData))
for i,(refPos, refItem) in enumerate(refData):
self.ui.appendTableResult(key, None, None, refPos, refItem, False)
continue
# know we have testData so let's determine the total number of
# errors for this test module. This will allow us to both set
# the num of errors in the test module header row and set the
# rows to the appropriate initial collapsed/expanded state.
errCount= self._getTotalErrorCnt(testData, refData)
collapse = (errCount == 0)
self.ui.appendSeqRowName(key, errCount)
# Now we will populate the table with all the rows
# of data fro the comparison
for i,(refPos, refItem) in enumerate(refData):
try:
# This will throw if testData's length is
# shorter than refData's
testPos,testItem = testData[i]
except:
testPos = None
testItem = None
self.ui.appendTableResult(
key, testPos, testItem, refPos, refItem, collapse
)
totalErrors += errCount
self.ui.setOutputStatus( totalErrors == 0 )
def _cleanName(self, name):
""" Clean the passed name of characters that won't be allowed
in CSS class or HTML id strings.
"""
# Convert testletName to replace any of the characters that
# are not acceptable in a CSS class or HTML id - this is to
# make our lives easier
# @note - I'm SPECIFICALLY not using a regex here because the
# regex engine module is still under dev and could possibly
# have issues
ret = name
invalidChars = [
'~', '!', '@', '$', '%',
'^', '&', '*', '(', ')',
'+', '=', ',', '.', '/',
"'", ';', ':', '"', '?',
'>', '<', '[', ']', '\\',
'{', '}', '|', '`', '#',
" ",
]
for ch in invalidChars:
ret = ret.replace(ch, "_")
return(ret)
def run (self, testlet, testletName):
testletName = self._cleanName(testletName)
self._currTestlet = testletName
if __envir__.executor_name == __envir__.transpiler_name:
self.testDict[self._currTestlet] = []
else:
self.refDict[self._currTestlet] = []
try:
testlet.run (self)
except Exception as exc:
if ( self.ui is not None ):
self.ui.setOutputStatus(False)
self.ui.showException(testletName, exc)
else:
# Error - No UI yet, reraise specific exception to enable finding out why
raise
def done (self):
if __envir__.executor_name == __envir__.transpiler_name:
self.compare ()
else:
fnameBase = __main__.__file__.replace ('\\', '/')
hg = HTMLGenerator(fnameBase)
hg.generate_html(self.refDict)
| apache-2.0 |
walterbender/Pippy | pippy_app.py | 2 | 59457 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007,2008,2009 Chris Ball, based on Collabora's
# "hellomesh" demo.
#
# Copyright (C) 2013,14 Walter Bender
# Copyright (C) 2013,14 Ignacio Rodriguez
# Copyright (C) 2013 Jorge Gomez
# Copyright (C) 2013,14 Sai Vineet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Pippy Activity: A simple Python programming activity ."""
import re
import os
import subprocess
from random import uniform
import locale
import json
import sys
from shutil import copy2
from signal import SIGTERM
from gettext import gettext as _
import uuid
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi import require_version
require_version('Gdk', '3.0')
require_version('Gtk', '3.0')
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import Pango
try:
require_version('Vte', '2.91')
except:
require_version('Vte', '2.90')
from gi.repository import Vte
from gi.repository import GObject
DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
from sugar3.datastore import datastore
from sugar3.activity import activity as activity
from sugar3.activity.widgets import EditToolbar
from sugar3.activity.widgets import StopButton
from sugar3.activity.activity import get_bundle_path
from sugar3.graphics.alert import Alert
from sugar3.graphics.alert import ConfirmationAlert
from sugar3.graphics.alert import NotifyAlert
from sugar3.graphics.icon import Icon
from sugar3.graphics.objectchooser import ObjectChooser
from sugar3.graphics.toggletoolbutton import ToggleToolButton
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.activity.widgets import ActivityToolbarButton
from jarabe.view.customizebundle import generate_unique_id
from activity import ViewSourceActivity
from activity import TARGET_TYPE_TEXT
from collabwrapper import CollabWrapper
from filedialog import FileDialog
from icondialog import IconDialog
from notebook import SourceNotebook, tab_object
from toolbars import DevelopViewToolbar
import sound_check
import logging
text_buffer = None
# magic prefix to use utf-8 source encoding
PYTHON_PREFIX = '''#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
# Force category names into Pootle
DEFAULT_CATEGORIES = [_('graphics'), _('math'), _('python'), _('sound'),
_('string'), _('tutorials')]
_logger = logging.getLogger('pippy-activity')
DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='{modulename}',
version='1.0',
py_modules=[
{filenames}
],
)
""" # This is .format()'ed with the list of the file names.
DISTUTILS_SETUP_SCRIPT = """#!/usr/bin/python3
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(name='{modulename}',
version='1.0',
py_modules=[
{filenames}
],
)
""" # This is .format()'ed with the list of the file names.
def _has_new_vte_api():
try:
return (Vte.MAJOR_VERSION >= 0 and
Vte.MINOR_VERSION >= 38)
except:
# Really old versions of Vte don't have VERSION
return False
def _find_object_id(activity_id, mimetype='text/x-python'):
''' Round-about way of accessing self._jobject.object_id '''
dsobjects, nobjects = datastore.find({'mime_type': [mimetype]})
for dsobject in dsobjects:
if 'activity_id' in dsobject.metadata and \
dsobject.metadata['activity_id'] == activity_id:
return dsobject.object_id
return None
class PippyActivity(ViewSourceActivity):
'''Pippy Activity as specified in activity.info'''
def __init__(self, handle):
self._pippy_instance = self
self.session_data = [] # Used to manage saving
self._loaded_session = [] # Used to manage tabs
self._py_file_loaded_from_journal = False
self._py_object_id = None
self._dialog = None
sys.path.append(os.path.join(self.get_activity_root(), 'Library'))
ViewSourceActivity.__init__(self, handle)
self._collab = CollabWrapper(self)
self._collab.message.connect(self.__message_cb)
self.set_canvas(self.initialize_display())
self.after_init()
self.connect("notify::active", self.__active_cb)
self._collab.setup()
def focus():
""" Enforce focus for the text view once. """
widget = self.get_toplevel().get_focus()
textview = self._source_tabs.get_text_view()
if widget is None and textview is not None:
textview.grab_focus()
return True
return False
GLib.timeout_add(100, focus)
def initialize_display(self):
'''Build activity toolbar with title input, share button and export
buttons
'''
toolbar_box = ToolbarBox()
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, 0)
self.set_toolbar_box(toolbar_box)
activity_button.show()
toolbar_box.show()
activity_toolbar = activity_button.page
separator = Gtk.SeparatorToolItem()
activity_toolbar.insert(separator, -1)
separator.show()
button = ToolButton('pippy-import-doc')
button.set_tooltip(_('Import Python file to new tab'))
button.connect('clicked', self._import_py_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-export-doc')
button.set_tooltip(_('Export as Pippy document'))
button.connect('clicked', self._export_document_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-export-library')
button.set_tooltip(_('Save this file to the Pippy library'))
button.connect('clicked', self._save_as_library)
activity_toolbar.insert(button, -1)
if not self._library_writable():
button.set_sensitive(False)
button.show()
button = ToolButton('pippy-export-example')
button.set_tooltip(_('Export as new Pippy example'))
button.connect('clicked', self._export_example_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-create-bundle')
button.set_tooltip(_('Create a Sugar activity bundle'))
button.connect('clicked', self._create_bundle_cb)
activity_toolbar.insert(button, -1)
button.show()
button = ToolButton('pippy-create-distutils')
# TRANS: A distutils package is used to distribute Python modules
button.set_tooltip(_('Export as a distutils package'))
button.connect('clicked', self._export_distutils_cb)
activity_toolbar.insert(button, -1)
button.show()
self._edit_toolbar = EditToolbar()
button = ToolbarButton()
button.set_page(self._edit_toolbar)
button.props.icon_name = 'toolbar-edit'
button.props.label = _('Edit')
self.get_toolbar_box().toolbar.insert(button, -1)
button.show()
self._edit_toolbar.show()
self._edit_toolbar.undo.connect('clicked', self.__undobutton_cb)
self._edit_toolbar.redo.connect('clicked', self.__redobutton_cb)
self._edit_toolbar.copy.connect('clicked', self.__copybutton_cb)
self._edit_toolbar.paste.connect('clicked', self.__pastebutton_cb)
view_btn = ToolbarButton()
view_toolbar = DevelopViewToolbar(self)
view_btn.props.page = view_toolbar
view_btn.props.icon_name = 'toolbar-view'
view_btn.props.label = _('View')
view_toolbar.connect('font-size-changed',
self._font_size_changed_cb)
self.get_toolbar_box().toolbar.insert(view_btn, -1)
self.view_toolbar = view_toolbar
view_toolbar.show()
actions_toolbar = self.get_toolbar_box().toolbar
self._toggle_output = ToggleToolButton('tray-show')
self._toggle_output.set_tooltip(_('Show output panel'))
self._toggle_output.connect('toggled', self._toggle_output_cb)
actions_toolbar.insert(self._toggle_output, -1)
self._toggle_output.show()
self._inverted_colors = ToggleToolButton(icon_name='dark-theme')
self._inverted_colors.set_tooltip(_('Inverted Colors'))
self._inverted_colors.set_accelerator('<Ctrl><Shift>I')
self._inverted_colors.connect(
'toggled', self.__inverted_colors_toggled_cb)
actions_toolbar.insert(self._inverted_colors, -1)
self._inverted_colors.show()
icons_path = os.path.join(get_bundle_path(), 'icons')
icon_bw = Gtk.Image()
icon_bw.set_from_file(os.path.join(icons_path, 'run_bw.svg'))
icon_bw.show()
icon_color = Gtk.Image()
icon_color.set_from_file(os.path.join(icons_path, 'run_color.svg'))
icon_color.show()
button = ToolButton(label=_('Run!'))
button.props.accelerator = _('<alt>r')
button.set_icon_widget(icon_bw)
button.set_tooltip(_('Run!'))
button.connect('clicked', self._flash_cb,
dict({'bw': icon_bw, 'color': icon_color}))
button.connect('clicked', self._go_button_cb)
actions_toolbar.insert(button, -1)
button.show()
icon_bw = Gtk.Image()
icon_bw.set_from_file(os.path.join(icons_path, 'stopit_bw.svg'))
icon_bw.show()
icon_color = Gtk.Image()
icon_color.set_from_file(os.path.join(icons_path, 'stopit_color.svg'))
icon_color.show()
button = ToolButton(label=_('Stop'))
button.props.accelerator = _('<alt>s')
button.set_icon_widget(icon_bw)
button.connect('clicked', self._flash_cb,
dict({'bw': icon_bw, 'color': icon_color}))
button.connect('clicked', self._stop_button_cb)
button.set_tooltip(_('Stop'))
actions_toolbar.insert(button, -1)
button.show()
icon_bw = Gtk.Image()
icon_bw.set_from_file(os.path.join(icons_path, 'eraser_bw.svg'))
icon_bw.show()
icon_color = Gtk.Image()
icon_color.set_from_file(os.path.join(icons_path, 'eraser_color.svg'))
icon_color.show()
button = ToolButton(label=_('Clear output panel'))
button.props.accelerator = _('<alt>c')
button.set_icon_widget(icon_bw)
button.connect('clicked', self._clear_button_cb)
button.connect('clicked', self._flash_cb,
dict({'bw': icon_bw, 'color': icon_color}))
button.set_tooltip(_('Clear output panel'))
actions_toolbar.insert(button, -1)
button.show()
activity_toolbar.show()
separator = Gtk.SeparatorToolItem()
self.get_toolbar_box().toolbar.insert(separator, -1)
separator.show()
button = ToolButton('pippy-openoff')
button.set_tooltip(_('Open an example'))
button.connect('clicked', self._load_example_cb)
self.get_toolbar_box().toolbar.insert(button, -1)
button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
self.get_toolbar_box().toolbar.insert(separator, -1)
separator.show()
stop = StopButton(self)
self.get_toolbar_box().toolbar.insert(stop, -1)
stop.show()
vpane = Gtk.Paned.new(orientation=Gtk.Orientation.VERTICAL)
vpane.set_position(400) # setting initial position
self.paths = []
try:
if sound_check.finddir():
TAMTAM_AVAILABLE = True
else:
TAMTAM_AVAILABLE = False
except sound_check.SoundLibraryNotFoundError:
TAMTAM_AVAILABLE = False
data_path = os.path.join(get_bundle_path(), 'data')
# get default language from locale
locale_lang = locale.getdefaultlocale()[0]
if locale_lang is None:
lang = 'en'
else:
lang = locale_lang.split('_')[0]
_logger.debug(locale.getdefaultlocale())
_logger.debug(lang)
# construct the path for both
lang_path = os.path.join(data_path, lang)
en_lang_path = os.path.join(data_path, 'en')
# get all folders in lang examples
all_folders = []
if os.path.exists(lang_path):
for d in sorted(os.listdir(lang_path)):
all_folders.append(d)
# get all folders in English examples
for d in sorted(os.listdir(en_lang_path)):
# check if folder isn't already in list
if d not in all_folders:
all_folders.append(d)
for folder in all_folders:
# Skip sound folders if TAMTAM is not installed
if folder == 'sound' and not TAMTAM_AVAILABLE:
continue
direntry = {}
# check if dir exists in pref language, if exists, add it
if os.path.exists(os.path.join(lang_path, folder)):
direntry = {
'name': _(folder.capitalize()),
'path': os.path.join(lang_path, folder) + '/'}
# if not try to see if it's in default English path
elif os.path.exists(os.path.join(en_lang_path, folder)):
direntry = {
'name': _(folder.capitalize()),
'path': os.path.join(en_lang_path, folder) + '/'}
self.paths.append([direntry['name'], direntry['path']])
# Adding local examples
data_path = os.path.join(get_bundle_path(), 'data')
self.paths.append([_('My examples'), data_path])
self._source_tabs = SourceNotebook(self, self._collab)
self._source_tabs.connect('tab-added', self._add_source_cb)
self._source_tabs.connect('tab-renamed', self._rename_source_cb)
self._source_tabs.connect('tab-closed', self._close_source_cb)
if self._loaded_session:
for name, content, path in self._loaded_session:
self._source_tabs.add_tab(name, content, path)
else:
self.session_data.append(None)
self._source_tabs.add_tab() # New instance, ergo empty tab
vpane.add1(self._source_tabs)
self._source_tabs.show()
self._outbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
self._vte = Vte.Terminal()
self._vte.set_encoding('utf-8')
self._vte.set_size(30, 5)
self._vte.set_scrollback_lines(-1)
self._vte_set_colors('#000000', '#E7E7E7')
self._child_exited_handler = None
self._vte.connect('child_exited', self._child_exited_cb)
self._vte.connect('drag_data_received', self._vte_drop_cb)
self._outbox.pack_start(self._vte, True, True, 0)
outsb = Gtk.Scrollbar(orientation=Gtk.Orientation.VERTICAL)
outsb.set_adjustment(self._vte.get_vadjustment())
outsb.show()
self._outbox.pack_start(outsb, False, False, 0)
self._load_config()
vpane.add2(self._outbox)
self._outbox.show()
vpane.show()
return vpane
def _vte_set_colors(self, bg, fg):
# XXX support both Vte APIs
if _has_new_vte_api():
foreground = Gdk.RGBA()
foreground.parse(bg)
background = Gdk.RGBA()
background.parse(fg)
else:
foreground = Gdk.color_parse(bg)
background = Gdk.color_parse(fg)
self._vte.set_colors(foreground, background, [])
def after_init(self):
self._outbox.hide()
def _font_size_changed_cb(self, widget, size):
self._source_tabs.set_font_size(size)
self._vte.set_font(
Pango.FontDescription('Monospace {}'.format(size)))
def _store_config(self):
font_size = self._source_tabs.get_font_size()
_config_file_path = os.path.join(
activity.get_activity_root(), 'data',
'config.json')
with open(_config_file_path, "w") as f:
f.write(json.dumps(font_size))
def _load_config(self):
_config_file_path = os.path.join(
activity.get_activity_root(), 'data',
'config.json')
if not os.path.isfile(_config_file_path):
return
with open(_config_file_path, "r") as f:
font_size = json.loads(f.read())
self.view_toolbar.set_font_size(font_size)
self._vte.set_font(
Pango.FontDescription('Monospace {}'.format(font_size)))
def __active_cb(self, widget, event):
_logger.debug('__active_cb %r', self.props.active)
if self.props.active:
self.resume()
else:
self.pause()
def do_visibility_notify_event(self, event):
_logger.debug('do_visibility_notify_event %r', event.get_state())
if event.get_state() == Gdk.VisibilityState.FULLY_OBSCURED:
self.pause()
else:
self.resume()
def pause(self):
# FIXME: We had resume, but no pause?
pass
def resume(self):
if self._dialog is not None:
self._dialog.set_keep_above(True)
def _toggle_output_cb(self, button):
shown = button.get_active()
if shown:
self._outbox.show_all()
self._toggle_output.set_tooltip(_('Hide output panel'))
self._toggle_output.set_icon_name('tray-hide')
else:
self._outbox.hide()
self._toggle_output.set_tooltip(_('Show output panel'))
self._toggle_output.set_icon_name('tray-show')
def __inverted_colors_toggled_cb(self, button):
if button.props.active:
self._vte_set_colors('#E7E7E7', '#000000')
self._source_tabs.set_dark()
button.set_icon_name('light-theme')
button.set_tooltip(_('Normal Colors'))
else:
self._vte_set_colors('#000000', '#E7E7E7')
self._source_tabs.set_light()
button.set_icon_name('dark-theme')
button.set_tooltip(_('Inverted Colors'))
def _load_example_cb(self, widget):
widget.set_icon_name('pippy-openon')
self._dialog = FileDialog(self.paths, self, widget)
self._dialog.show()
self._dialog.run()
path = self._dialog.get_path()
if path:
self._select_func_cb(path)
def _add_source_cb(self, button, force=False, editor_id=None):
if self._collab._leader or force:
if editor_id is None:
editor_id = str(uuid.uuid1())
self._source_tabs.add_tab(editor_id=editor_id)
self.session_data.append(None)
self._source_tabs.get_nth_page(-1).show_all()
self._source_tabs.get_text_view().grab_focus()
if self._collab._leader:
self._collab.post(dict(
action='add-source',
editor_id=editor_id))
else:
# The leader must do it first so that they can set
# up the text buffer
self._collab.post(dict(action='add-source-request'))
# Check if dark mode enabled, apply it
if self._inverted_colors.props.active:
self._source_tabs.set_dark()
def _rename_source_cb(self, notebook, page, name):
_logger.debug('_rename_source_cb %r %r' % (page, name))
self._collab.post(dict(action='rename-source', page=page, name=name))
def _close_source_cb(self, notebook, page):
_logger.debug('_close_source_cb %r' % (page))
self._collab.post(dict(action='close-source', page=page))
def __message_cb(self, collab, buddy, msg):
action = msg.get('action')
if action == 'add-source-request' and self._collab._leader:
self._add_source_cb(None, force=True)
elif action == 'add-source':
self._add_source_cb(
None, force=True, editor_id=msg.get('editor_id'))
elif action == 'rename-source':
page = msg.get('page')
name = msg.get('name')
_logger.debug('__message_cb rename-source %r %r' % (page, name))
self._source_tabs.rename_tab(page, name)
elif action == 'close-source':
page = msg.get('page')
_logger.debug('__message_cb close-source %r' % (page))
self._source_tabs.close_tab(page)
def _vte_drop_cb(self, widget, context, x, y, selection, targetType, time):
if targetType == TARGET_TYPE_TEXT:
self._vte.feed_child(selection.data)
def get_data(self):
return self._source_tabs.get_all_data()
def set_data(self, data):
# Remove initial new/blank thing
self.session_data = []
self._loaded_session = []
try:
self._source_tabs.remove_page(0)
tab_object.pop(0)
self._source_tabs.last_tab = 0
except IndexError:
pass
list_ = list(zip(*data))
for name, code, path, modified, editor_id in list_:
self._source_tabs.add_tab(
label=name, editor_id=editor_id)
self.session_data.append(None) # maybe?
def _selection_cb(self, value):
self.save()
_logger.debug('clicked! %s' % value['path'])
_file = open(value['path'], 'r')
lines = _file.readlines()
self._add_source_cb(None)
text_buffer = self._source_tabs.get_text_buffer()
text_buffer.set_text(''.join(lines))
text_buffer.set_modified(False)
self._pippy_instance.metadata['title'] = value['name']
self._stop_button_cb(None)
self._reset_vte()
self._source_tabs.set_current_label(value['name'])
self._source_tabs.set_current_path(value['path'])
self._source_tabs.get_text_view().grab_focus()
def _select_func_cb(self, path):
values = {}
values['name'] = os.path.basename(path)
values['path'] = path
self._selection_cb(values)
def _timer_cb(self, button, icons):
button.set_icon_widget(icons['bw'])
button.show_all()
return False
def _flash_cb(self, button, icons):
button.set_icon_widget(icons['color'])
button.show_all()
GObject.timeout_add(400, self._timer_cb, button, icons)
def _clear_button_cb(self, button):
self.save()
self._stop_button_cb(None)
self._reset_vte()
self._source_tabs.get_text_view().grab_focus()
def _write_all_buffers(self, tmp_dir):
data = self._source_tabs.get_all_data()
zipdata = list(zip(data[0], data[1]))
for name, content in zipdata:
name = self._source_tabs.purify_name(name)
with open(os.path.join(tmp_dir, name), 'w') as f:
# Write utf-8 coding prefix if there's not one already
if re.match(r'coding[:=]\s*([-\w.]+)',
'\n'.join(content.splitlines()[:2])) is None:
f.write(PYTHON_PREFIX)
f.write(content)
def _reset_vte(self):
self._vte.grab_focus()
self._vte.feed(b'\x1B[H\x1B[J\x1B[0;39m')
def __undobutton_cb(self, butston):
text_buffer = self._source_tabs.get_text_buffer()
if text_buffer.can_undo():
text_buffer.undo()
def __redobutton_cb(self, button):
text_buffer = self._source_tabs.get_text_buffer()
if text_buffer.can_redo():
text_buffer.redo()
def __copybutton_cb(self, button):
text_buffer = self._source_tabs.get_text_buffer()
if self._vte.get_has_selection():
self._vte.copy_clipboard()
elif text_buffer.get_has_selection():
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
text_buffer.copy_clipboard(clipboard)
def __pastebutton_cb(self, button):
text_buffer = self._source_tabs.get_text_buffer()
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
text_buffer.paste_clipboard(clipboard, None, True)
def _go_button_cb(self, button):
self._stop_button_cb(button) # Try stopping old code first.
self._reset_vte()
# FIXME: We're losing an odd race here
# Gtk.main_iteration(block=False)
if self._toggle_output.get_active() is False:
self._outbox.show_all()
self._toggle_output.set_active(True)
pippy_tmp_dir = '%s/tmp/' % self.get_activity_root()
self._write_all_buffers(pippy_tmp_dir)
current_file = os.path.join(
pippy_tmp_dir,
self._source_tabs.get_current_file_name())
# Write activity.py here too, to support pippy-based activities.
copy2('%s/activity.py' % get_bundle_path(),
'%s/tmp/activity.py' % self.get_activity_root())
# XXX Support both Vte APIs
if _has_new_vte_api():
vte_run = self._vte.spawn_sync
else:
vte_run = self._vte.fork_command_full
self._pid = vte_run(
Vte.PtyFlags.DEFAULT,
get_bundle_path(),
['/bin/sh', '-c', 'python3 %s; sleep 1' % current_file,
'PYTHONPATH=%s/library:%s' % (get_bundle_path(),
os.getenv('PYTHONPATH', ''))],
['PYTHONPATH=%s/library:%s' % (get_bundle_path(),
os.getenv('PYTHONPATH', ''))],
GLib.SpawnFlags.DO_NOT_REAP_CHILD,
None,
None,)
def _stop_button_cb(self, button):
try:
if self._pid is not None:
os.kill(self._pid[1], SIGTERM)
except:
pass # Process must already be dead.
def _library_writable(self):
return os.access(os.path.join(get_bundle_path(), 'library'), os.W_OK)
def _save_as_library(self, button):
library_dir = os.path.join(get_bundle_path(), 'library')
file_name = self._source_tabs.get_current_file_name()
text_buffer = self._source_tabs.get_text_buffer()
content = text_buffer.get_text(
*text_buffer.get_bounds(),
include_hidden_chars=True)
if not os.path.isdir(library_dir):
os.mkdir(library_dir)
with open(os.path.join(library_dir, file_name), 'w') as f:
f.write(content)
success = True
if success:
alert = NotifyAlert(5)
alert.props.title = _('Python File added to Library')
IMPORT_MESSAGE = _('The file you selected has been added'
' to the library. Use "import {importname}"'
' to import the library for using.')
alert.props.msg = IMPORT_MESSAGE.format(importname=file_name[:-3])
alert.connect('response', self._remove_alert_cb)
self.add_alert(alert)
def _export_document_cb(self, __):
self.copy()
alert = NotifyAlert()
alert.props.title = _('Saved')
alert.props.msg = _('The document has been saved to journal.')
alert.connect('response', lambda x, i: self.remove_alert(x))
self.add_alert(alert)
def _remove_alert_cb(self, alert, response_id):
self.remove_alert(alert)
def _import_py_cb(self, button):
chooser = ObjectChooser()
result = chooser.run()
if result is Gtk.ResponseType.ACCEPT:
dsitem = chooser.get_selected_object()
if dsitem.metadata['mime_type'] != 'text/x-python':
alert = NotifyAlert(5)
alert.props.title = _('Error importing Python file')
alert.props.msg = _('The file you selected is not a '
'Python file.')
alert.connect('response', self._remove_alert_cb)
self.add_alert(alert)
elif dsitem.object_id in self.session_data:
alert = NotifyAlert(5)
alert.props.title = _('Error importing Python file')
alert.props.msg = _('The file you selected is already '
'open')
alert.connect('response', self._remove_alert_cb)
self.add_alert(alert)
else:
name = dsitem.metadata['title']
file_path = dsitem.get_file_path()
content = open(file_path, 'r').read()
self._source_tabs.add_tab(name, content, None)
self._source_tabs.set_current_label(name)
self.session_data.append(dsitem.object_id)
_logger.debug('after import py: %r' % self.session_data)
chooser.destroy()
def _create_bundle_cb(self, button):
from shutil import rmtree
from tempfile import mkdtemp
# Get the name of this pippy program.
title = self._pippy_instance.metadata['title'].replace('.py', '')
title = title.replace('-', '')
if title == 'Pippy Activity':
alert = Alert()
alert.props.title = _('Save as Activity Error')
alert.props.msg = _('Please give your activity a meaningful name '
'before attempting to save it as an activity.')
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
alert_icon = Alert()
ok_icon = Icon(icon_name='dialog-ok')
alert_icon.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert_icon.props.title = _('Activity icon')
alert_icon.props.msg = _('Please select an activity icon.')
self._stop_button_cb(None) # try stopping old code first.
self._reset_vte()
self._outbox.show_all()
self._vte.feed(_("Creating activity bundle...").encode())
self._vte.feed(b'\r\n')
TMPDIR = 'instance'
app_temp = mkdtemp('.activity', 'Pippy',
os.path.join(self.get_activity_root(), TMPDIR))
sourcefile = os.path.join(app_temp, 'xyzzy.py')
# invoke ourself to build the activity bundle.
_logger.debug('writing out source file: %s' % sourcefile)
def internal_callback(window=None, event=None):
icon = '%s/activity/activity-default.svg' % (get_bundle_path())
if window:
icon = window.get_icon()
self._stop_button_cb(None) # Try stopping old code first.
self._reset_vte()
self._vte.feed(_('Creating activity bundle...').encode())
self._vte.feed(b'\r\n')
TMPDIR = 'instance'
app_temp = mkdtemp('.activity', 'Pippy',
os.path.join(self.get_activity_root(), TMPDIR))
sourcefile = os.path.join(app_temp, 'xyzzy.py')
# Invoke ourself to build the activity bundle.
_logger.debug('writing out source file: %s' % sourcefile)
# Write out application code
self._write_text_buffer(sourcefile)
try:
# FIXME: vte invocation was raising errors.
# Switched to subprocss
output = subprocess.check_output(
['/usr/bin/python3',
'%s/pippy_app.py' % get_bundle_path(),
'-p', '%s/library' % get_bundle_path(),
'-d', app_temp, title, sourcefile, icon])
self._vte.feed(output)
self._vte.feed(b'\r\n')
self._bundle_cb(title, app_temp)
except subprocess.CalledProcessError:
rmtree(app_temp, ignore_errors=True) # clean up!
self._vte.feed(_('Save as Activity Error').encode())
self._vte.feed(b'\r\n')
raise
def _alert_response(alert, response_id):
self.remove_alert(alert)
def _dialog():
dialog = IconDialog()
dialog.connect('destroy', internal_callback)
GObject.idle_add(_dialog)
alert_icon.connect('response', _alert_response)
self.add_alert(alert_icon)
def _write_text_buffer(self, filename):
text_buffer = self._source_tabs.get_text_buffer()
start, end = text_buffer.get_bounds()
text = text_buffer.get_text(start, end, True)
with open(filename, 'w') as f:
# Write utf-8 coding prefix if there's not one already
if re.match(r'coding[:=]\s*([-\w.]+)',
'\n'.join(text.splitlines()[:2])) is None:
f.write(PYTHON_PREFIX)
for line in text:
f.write(line)
def _export_distutils_cb(self, button):
app_temp = os.path.join(self.get_activity_root(), 'instance')
data = self._source_tabs.get_all_data()
for filename, content in zip(data[0], data[1]):
fileobj = open(os.path.join(app_temp, filename), 'w')
fileobj.write(content)
fileobj.close()
filenames = ','.join([("'" + name[:-3] + "'") for name in data[0]])
title = self._pippy_instance.metadata['title']
if title is _('Pippy Activity'):
alert = Alert()
alert.props.title = _('Save as distutils package error')
alert.props.msg = _('Please give your activity a meaningful '
'name before attempting to save it '
'as an distutils package.')
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
found = next((
name for name in data[0]
if name != self._source_tabs.purify_name(name)),
None)
if found is not None:
example = self._source_tabs.purify_name(found)
alert = Alert()
alert.props.title = _('Save as distutils package error')
alert.props.msg = _('Please give your source files a proper '
'name, for example "%s", before attempting to '
'save it as an distutils package.') % example
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
setup_script = DISTUTILS_SETUP_SCRIPT.format(modulename=title,
filenames=filenames)
setupfile = open(os.path.join(app_temp, 'setup.py'), 'w')
setupfile.write(setup_script)
setupfile.close()
os.chdir(app_temp)
subprocess.check_output(
['/usr/bin/python3', os.path.join(app_temp, 'setup.py'), 'sdist',
'-v'])
# Hand off to journal
os.chmod(app_temp, 0o777)
jobject = datastore.create()
metadata = {
'title': '%s distutils bundle' % title,
'title_set_by_user': '1',
'mime_type': 'application/x-gzip',
}
for k, v in list(metadata.items()):
# The dict.update method is missing =(
jobject.metadata[k] = v
tarname = 'dist/{modulename}-1.0.tar.gz'.format(modulename=title)
jobject.file_path = os.path.join(app_temp, tarname)
datastore.write(jobject)
def _export_example_cb(self, button):
# Get the name of this pippy program.
title = self._pippy_instance.metadata['title']
if title == _('Pippy Activity'):
alert = Alert()
alert.props.title = _('Save as Example Error')
alert.props.msg = \
_('Please give your activity a meaningful '
'name before attempting to save it as an example.')
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, _('Ok'), ok_icon)
alert.connect('response', self._dismiss_alert_cb)
self.add_alert(alert)
return
self._stop_button_cb(None) # Try stopping old code first.
self._reset_vte()
self._vte.feed(_('Creating example...').encode())
self._vte.feed(b'\r\n')
local_data = os.path.join(os.environ['SUGAR_ACTIVITY_ROOT'], 'data')
local_file = os.path.join(local_data, title)
if os.path.exists(local_file):
alert = ConfirmationAlert()
alert.props.title = _('Save as Example Warning')
alert.props.msg = _('This example already exists. '
'Do you want to overwrite it?')
alert.connect('response', self._confirmation_alert_cb, local_file)
self.add_alert(alert)
else:
self.write_file(local_file)
self._reset_vte()
self._vte.feed(_('Saved as example.').encode())
self._vte.feed(b'\r\n')
self._add_to_example_list(local_file)
def _child_exited_cb(self, *args):
'''Called whenever a child exits. If there's a handler, run it.'''
h, self._child_exited_handler = self._child_exited_handler, None
if h is not None:
h()
def _bundle_cb(self, title, app_temp):
'''Called when we're done building a bundle for a source file.'''
from sugar3 import profile
from shutil import rmtree
try:
# Find the .xo file: were we successful?
bundle_file = [f for f in os.listdir(app_temp)
if f.endswith('.xo')]
if len(bundle_file) != 1:
_logger.debug("Couldn't find bundle: %s" %
str(bundle_file))
self._vte.feed(b'\r\n')
self._vte.feed(_('Error saving activity to journal.').encode())
self._vte.feed(b'\r\n')
return # Something went wrong.
# Hand off to journal
os.chmod(app_temp, 0o755)
jobject = datastore.create()
metadata = {
'title': '%s Bundle' % title,
'title_set_by_user': '1',
'buddies': '',
'preview': '',
'icon-color': profile.get_color().to_string(),
'mime_type': 'application/vnd.olpc-sugar',
}
for k, v in list(metadata.items()):
# The dict.update method is missing =(
jobject.metadata[k] = v
jobject.file_path = os.path.join(app_temp, bundle_file[0])
datastore.write(jobject)
self._vte.feed(b'\r\n')
self._vte.feed(_('Activity saved to journal.').encode())
self._vte.feed(b'\r\n')
self.journal_show_object(jobject.object_id)
jobject.destroy()
finally:
rmtree(app_temp, ignore_errors=True) # clean up!
def _dismiss_alert_cb(self, alert, response_id):
self.remove_alert(alert)
def _confirmation_alert_cb(self, alert, response_id, local_file):
# Callback for conf alert
self.remove_alert(alert)
if response_id is Gtk.ResponseType.OK:
self.write_file(local_file)
self._reset_vte()
self._vte.feed(_('Saved as example.').encode())
self._vte.feed(b'\r\n')
else:
self._reset_vte()
def _add_to_example_list(self, local_file):
entry = {'name': _(os.path.basename(local_file)),
'path': local_file}
_iter = self.model.insert_before(self.example_iter, None)
self.model.set_value(_iter, 0, entry)
self.model.set_value(_iter, 1, entry['name'])
def is_example(self, path):
if path is None:
return False
for name in self.paths:
if path.startswith(name[1]):
return True
return False
def _get_pippy_object_id(self):
''' We need the object_id of this pippy instance to save in the .py
file metadata'''
if self._pippy_instance == self:
return _find_object_id(self.metadata['activity_id'],
mimetype='application/json')
else:
return self._pippy_instance.get_object_id()
def write_file(self, file_path):
pippy_id = self._get_pippy_object_id()
data = self._source_tabs.get_all_data()
zipped_data = list(zip(*data))
session_list = []
app_temp = os.path.join(self.get_activity_root(), 'instance')
tmpfile = os.path.join(app_temp, 'pippy-tempfile-storing.py')
if not self.session_data:
self.session_data.append(None)
for zipdata, content in zip(zipped_data, self.session_data):
_logger.debug('Session data %r', content)
name, python_code, path, modified, editor_id = zipdata
if content is not None and content == self._py_object_id:
_logger.debug('saving to self')
self.metadata['title'] = name
self.metadata['mime_type'] = 'text/x-python'
if pippy_id is not None:
self.metadata['pippy_instance'] = pippy_id
__file = open(file_path, 'w')
__file.write(python_code)
__file.close()
session_list.append([name, content])
elif content is not None and content[0] != '/':
_logger.debug('Saving an existing dsobject')
dsobject = datastore.get(content)
dsobject.metadata['title'] = name
dsobject.metadata['mime_type'] = 'text/x-python'
if pippy_id is not None:
dsobject.metadata['pippy_instance'] = pippy_id
__file = open(tmpfile, 'w')
__file.write(python_code)
__file.close()
dsobject.set_file_path(tmpfile)
datastore.write(dsobject)
session_list.append([name, dsobject.object_id])
elif modified:
_logger.debug('Creating new dsobj for modified code')
if len(python_code) > 0:
dsobject = datastore.create()
dsobject.metadata['title'] = name
dsobject.metadata['mime_type'] = 'text/x-python'
if pippy_id is not None:
dsobject.metadata['pippy_instance'] = pippy_id
__file = open(tmpfile, 'w')
__file.write(python_code)
__file.close()
dsobject.set_file_path(tmpfile)
datastore.write(dsobject)
session_list.append([name, dsobject.object_id])
# If there are multiple Nones, we need to find
# the correct one.
if content is None and \
self.session_data.count(None) > 1:
i = zipped_data.index(zipdata)
else:
i = self.session_data.index(content)
self.session_data[i] = dsobject.object_id
elif content is not None or path is not None:
_logger.debug('Saving reference to sample file')
if path is None: # Should not happen, but just in case...
_logger.error('path is None.')
session_list.append([name, content])
else:
session_list.append([name, path])
else: # Should not happen, but just in case...
_logger.debug('Nothing to save in tab? %s %s %s %s' %
(str(name), str(python_code), str(path),
str(content)))
self._pippy_instance.metadata['mime_type'] = 'application/json'
pippy_data = json.dumps(session_list)
# Override file path if we created a new Pippy instance
if self._py_file_loaded_from_journal:
file_path = os.path.join(app_temp, 'pippy-temp-instance-data')
_file = open(file_path, 'w')
_file.write(pippy_data)
_file.close()
if self._py_file_loaded_from_journal:
_logger.debug('setting pippy instance file_path to %s' %
file_path)
self._pippy_instance.set_file_path(file_path)
datastore.write(self._pippy_instance)
self._store_config()
def read_file(self, file_path):
# Either we are opening Python code or a list of objects
# stored (json-encoded) in a Pippy instance, or a shared
# session.
# Remove initial new/blank thing
self.session_data = []
self._loaded_session = []
try:
self._source_tabs.remove_page(0)
tab_object.pop(0)
self._source_tabs.last_tab = 0
except IndexError:
pass
if self.metadata['mime_type'] == 'text/x-python':
_logger.debug('Loading Python code')
# Opening some Python code directly
try:
text = open(file_path).read()
except:
alert = NotifyAlert(10)
alert.props.title = _('Error')
alert.props.msg = _('Error reading data.')
def _remove_alert(alert, response_id):
self.remove_alert(alert)
alert.connect("response", _remove_alert)
self.add_alert(alert)
return
self._py_file_loaded_from_journal = True
# Discard the '#!/usr/bin/python3' and 'coding: utf-8' lines,
# if present
python_code = re.sub(r'^' + re.escape(PYTHON_PREFIX), '', text)
name = self.metadata['title']
self._loaded_session.append([name, python_code, None])
# Since we loaded Python code, we need to create (or
# restore) a Pippy instance
if 'pippy_instance' in self.metadata:
_logger.debug('found a pippy instance: %s' %
self.metadata['pippy_instance'])
try:
self._pippy_instance = datastore.get(
self.metadata['pippy_instance'])
except:
_logger.debug('Cannot find old Pippy instance: %s')
self._pippy_instance = None
if self._pippy_instance in [self, None]:
self._pippy_instance = datastore.create()
self._pippy_instance.metadata['title'] = self.metadata['title']
self._pippy_instance.metadata['mime_type'] = 'application/json'
self._pippy_instance.metadata['activity'] = 'org.laptop.Pippy'
datastore.write(self._pippy_instance)
self.metadata['pippy_instance'] = \
self._pippy_instance.get_object_id()
_logger.debug('get_object_id %s' %
self.metadata['pippy_instance'])
# We need the Pippy file path so we can read the session data
file_path = self._pippy_instance.get_file_path()
# Finally, add this Python object to the session data
self._py_object_id = _find_object_id(self.metadata['activity_id'])
self.session_data.append(self._py_object_id)
_logger.debug('session_data: %s' % self.session_data)
if self.metadata['mime_type'] == 'application/json' or \
self._pippy_instance != self:
# Reading file list from Pippy instance
_logger.debug('Loading Pippy instance')
if len(file_path) == 0:
return
data = json.loads(open(file_path).read())
for name, content in data:
# content is either a datastore id or the path to some
# sample code
if content is not None and content[0] == '/': # a path
try:
python_code = open(content).read()
except:
_logger.error('Could not open %s; skipping' % content)
path = content
elif content != self._py_object_id:
try:
dsobject = datastore.get(content)
if 'mime_type' not in dsobject.metadata:
_logger.error(
'Warning: %s missing mime_type' % content)
elif dsobject.metadata['mime_type'] != 'text/x-python':
_logger.error(
'Warning: %s has unexpected mime_type %s' %
(content, dsobject.metadata['mime_type']))
except:
# Could be that the item has subsequently been
# deleted from the datastore, so we skip it.
_logger.error('Could not open %s; skipping' % content)
continue
try:
python_code = open(dsobject.get_file_path()).read()
except:
# Malformed bundle?
_logger.error('Could not open %s; skipping' %
dsobject.get_file_path())
continue
path = None
# Queue up the creation of the tabs...
# And add this content to the session data
if content not in self.session_data:
self.session_data.append(content)
self._loaded_session.append([name, python_code, path])
# Create tabs from the datastore, else add a blank tab
if self._loaded_session:
for name, content, path in self._loaded_session:
self._source_tabs.add_tab(name, content, path)
else:
self._source_tabs.add_tab()
# TEMPLATES AND INLINE FILES
ACTIVITY_INFO_TEMPLATE = '''
[Activity]
name = %(title)s
bundle_id = %(bundle_id)s
exec = sugar-activity3 %(class)s
icon = activity-icon
activity_version = %(version)d
mime_types = %(mime_types)s
show_launcher = yes
%(extra_info)s
'''
PIPPY_ICON = """<?xml version="1.0" ?><!DOCTYPE svg PUBLIC '-//W3C//DTD SVG
1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd' [
<!ENTITY stroke_color "#010101">
<!ENTITY fill_color "#FFFFFF">
]>
<svg enable-background="new 0 0 55 55" height="55px" version="1.1"
viewBox="0 0 55 55" width="55px" x="0px" xml:space="preserve"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink" y="0px"><g display="block"
id="activity-pippy">
<path d="M28.497,48.507
c5.988,0,14.88-2.838,14.88-11.185
c0-9.285-7.743-10.143-10.954-11.083
c-3.549-0.799-5.913-1.914-6.055-3.455
c-0.243-2.642,1.158-3.671,3.946-3.671
c0,0,6.632,3.664,12.266,0.74
c1.588-0.823,4.432-4.668,4.432-7.32
c0-2.653-9.181-5.719-11.967-5.719
c-2.788,0-5.159,3.847-5.159,3.847
c-5.574,0-11.149,5.306-11.149,10.612
c0,5.305,5.333,9.455,11.707,10.612
c2.963,0.469,5.441,2.22,4.878,5.438
c-0.457,2.613-2.995,5.306-8.361,5.306
c-4.252,0-13.3-0.219-14.745-4.079
c-0.929-2.486,0.168-5.205,1.562-5.205l-0.027-0.16
c-1.42-0.158-5.548,0.16-5.548,5.465
C8.202,45.452,17.347,48.507,28.497,48.507z"
fill="&fill_color;" stroke="&stroke_color;"
stroke-linecap="round" stroke-linejoin="round" stroke-width="3.5"/>
<path d="M42.579,19.854c-2.623-0.287-6.611-2-7.467-5.022" fill="none"
stroke="&stroke_color;" stroke-linecap="round" stroke-width="3"/>
<circle cx="35.805" cy="10.96" fill="&stroke_color;" r="1.676"/>
</g></svg><!-- " -->
"""
# ACTIVITY META-INFORMATION
# this is used by Pippy to generate a bundle for itself.
def pippy_activity_version():
'''Returns the version number of the generated activity bundle.'''
return 39
def pippy_activity_extra_files():
'''Returns a map of 'extra' files which should be included in the
generated activity bundle.'''
# Cheat here and generate the map from the fs contents.
extra = {}
bp = get_bundle_path()
for d in ['po', 'data', 'post']: # everybody gets library
for root, dirs, files in os.walk(os.path.join(bp, d)):
for name in files:
fn = os.path.join(root, name).replace(bp + '/', '')
extra[fn] = open(os.path.join(root, name), 'r').read()
return extra
def pippy_activity_news():
'''Return the NEWS file for this activity.'''
# Cheat again.
return open(os.path.join(get_bundle_path(), 'NEWS')).read()
def pippy_activity_icon():
'''Return an SVG document specifying the icon for this activity.'''
return PIPPY_ICON
def pippy_activity_class():
'''Return the class which should be started to run this activity.'''
return 'pippy_app.PippyActivity'
def pippy_activity_bundle_id():
'''Return the bundle_id for the generated activity.'''
return 'org.laptop.Pippy'
def pippy_activity_mime_types():
'''Return the mime types handled by the generated activity, as a list.'''
return ['text/x-python']
def pippy_activity_extra_info():
return '''
license = GPLv2+
update_url = http://activities.sugarlabs.org '''
# ACTIVITY BUNDLER
def main():
'''Create a bundle from a pippy-style source file'''
from optparse import OptionParser
from pyclbr import readmodule_ex
from tempfile import mkdtemp
from shutil import copytree, copy2, rmtree
from sugar3.activity import bundlebuilder
parser = OptionParser(usage='%prog [options] [title] [sourcefile] [icon]')
parser.add_option('-d', '--dir', dest='dir', default='.', metavar='DIR',
help='Put generated bundle in the specified directory.')
parser.add_option('-p', '--pythonpath', dest='path', action='append',
default=[], metavar='DIR',
help='Append directory to python search path.')
(options, args) = parser.parse_args()
if len(args) < 3:
parser.error('The title, sourcefile and icon arguments are required.')
title = args[0]
sourcefile = args[1]
icon_path = args[2]
pytitle = re.sub(r'[^A-Za-z0-9_]', '', title)
if re.match(r'[0-9]', pytitle) is not None:
pytitle = '_' + pytitle # first character cannot be numeric
# First take a gander at the source file and see if it's got extra info
# for us.
sourcedir, basename = os.path.split(sourcefile)
if not sourcedir:
sourcedir = '.'
module, ext = os.path.splitext(basename)
f = open(icon_path, 'r')
icon = f.read()
f.close()
# Things we look for:
bundle_info = {
'version': 1,
'extra_files': {},
'news': 'No news.',
'icon': icon,
'class': 'activity.VteActivity',
'bundle_id': ('org.sugarlabs.pippy.%s%d' %
(generate_unique_id(),
int(round(uniform(1000, 9999), 0)))),
'mime_types': '',
'extra_info': '',
}
# Are any of these things in the module?
try_import = False
info = readmodule_ex(module, [sourcedir] + options.path)
for func in list(bundle_info.keys()):
p_a_func = 'pippy_activity_%s' % func
if p_a_func in info:
try_import = True
if try_import:
# Yes, let's try to execute them to get better info about our bundle
oldpath = list(sys.path)
sys.path[0:0] = [sourcedir] + options.path
modobj = __import__(module)
for func in list(bundle_info.keys()):
p_a_func = 'pippy_activity_%s' % func
if p_a_func in modobj.__dict__:
bundle_info[func] = modobj.__dict__[p_a_func]()
sys.path = oldpath
# Okay! We've done the hard part. Now let's build a bundle.
# Create a new temp dir in which to create the bundle.
app_temp = mkdtemp('.activity', 'Pippy') # Hope TMPDIR is set correctly!
bundle = get_bundle_path()
try:
copytree('%s/library' % bundle, '%s/library' % app_temp)
copy2('%s/activity.py' % bundle, '%s/activity.py' % app_temp)
# create activity.info file.
bundle_info['title'] = title
bundle_info['pytitle'] = pytitle
# put 'extra' files in place.
extra_files = {
'activity/activity.info': ACTIVITY_INFO_TEMPLATE % bundle_info,
'activity/activity-icon.svg': bundle_info['icon'],
'NEWS': bundle_info['news'],
}
extra_files.update(bundle_info['extra_files'])
for path, contents in list(extra_files.items()):
# safety first!
assert '..' not in path
dirname, filename = os.path.split(path)
dirname = os.path.join(app_temp, dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(os.path.join(dirname, filename), 'w') as f:
f.write(contents)
# Put script into $app_temp/pippy_app.py
copy2(sourcefile, '%s/pippy_app.py' % app_temp)
# Invoke bundle builder
olddir = os.getcwd()
oldargv = sys.argv
os.chdir(app_temp)
sys.argv = ['setup.py', 'dist_xo']
print('\r\nStarting bundlebuilder\r\n')
bundlebuilder.start()
sys.argv = oldargv
os.chdir(olddir)
# Move to destination directory.
src = '%s/dist/%s-%d.xo' % (app_temp, pytitle, bundle_info['version'])
dst = '%s/%s-%d.xo' % (options.dir, pytitle, bundle_info['version'])
if not os.path.exists(src):
print('Cannot find %s\r\n' % (src))
else:
copy2(src, dst)
finally:
rmtree(app_temp, ignore_errors=True)
print('Finally\r\n')
if __name__ == '__main__':
from gettext import gettext as _
if False: # Change this to True to test within Pippy
sys.argv = sys.argv + ['-d', '/tmp', 'Pippy',
'/home/olpc/pippy_app.py']
print(_('Working...'))
sys.stdout.flush()
main()
print(_('done!'))
sys.exit(0)
| gpl-3.0 |
Xeralux/tensorflow | tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py | 119 | 3797 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse feature column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.ops import internal_convert_to_tensor
from tensorflow.python.framework.ops import name_scope
class SparseFeatureColumn(object):
"""Represents a sparse feature column.
Contains three tensors representing a sparse feature column, they are
example indices (`int64`), feature indices (`int64`), and feature
values (`float`).
Feature weights are optional, and are treated as `1.0f` if missing.
For example, consider a batch of 4 examples, which contains the following
features in a particular `SparseFeatureColumn`:
* Example 0: feature 5, value 1
* Example 1: feature 6, value 1 and feature 10, value 0.5
* Example 2: no features
* Example 3: two copies of feature 2, value 1
This SparseFeatureColumn will be represented as follows:
```
<0, 5, 1>
<1, 6, 1>
<1, 10, 0.5>
<3, 2, 1>
<3, 2, 1>
```
For a batch of 2 examples below:
* Example 0: feature 5
* Example 1: feature 6
is represented by `SparseFeatureColumn` as:
```
<0, 5, 1>
<1, 6, 1>
```
@@__init__
@@example_indices
@@feature_indices
@@feature_values
"""
def __init__(self, example_indices, feature_indices, feature_values):
"""Creates a `SparseFeatureColumn` representation.
Args:
example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also,
accepts python lists, or numpy arrays.
Returns:
A `SparseFeatureColumn`
"""
with name_scope(None, 'SparseFeatureColumn',
[example_indices, feature_indices]):
self._example_indices = internal_convert_to_tensor(
example_indices, name='example_indices', dtype=dtypes.int64)
self._feature_indices = internal_convert_to_tensor(
feature_indices, name='feature_indices', dtype=dtypes.int64)
self._feature_values = None
if feature_values is not None:
with name_scope(None, 'SparseFeatureColumn', [feature_values]):
self._feature_values = internal_convert_to_tensor(
feature_values, name='feature_values', dtype=dtypes.float32)
@property
def example_indices(self):
"""The example indices represented as a dense tensor.
Returns:
A 1-D Tensor of int64 with shape `[N]`.
"""
return self._example_indices
@property
def feature_indices(self):
"""The feature indices represented as a dense tensor.
Returns:
A 1-D Tensor of int64 with shape `[N]`.
"""
return self._feature_indices
@property
def feature_values(self):
"""The feature values represented as a dense tensor.
Returns:
May return None, or a 1-D Tensor of float32 with shape `[N]`.
"""
return self._feature_values
| apache-2.0 |
meteorcloudy/tensorflow | tensorflow/python/kernel_tests/io_ops_test.py | 23 | 4898 | # -*- coding: utf-8 -*-
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class IoOpsTest(test.TestCase):
def testReadFile(self):
cases = ['', 'Some contents', 'Неки садржаји на српском']
for contents in cases:
contents = compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(
prefix='ReadFileTest', dir=self.get_temp_dir(), delete=False) as temp:
temp.write(contents)
with self.test_session():
read = io_ops.read_file(temp.name)
self.assertEqual([], read.get_shape())
self.assertEqual(read.eval(), contents)
os.remove(temp.name)
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
contents = compat.as_bytes(contents)
with tempfile.NamedTemporaryFile(
prefix='WriteFileTest', dir=self.get_temp_dir(),
delete=False) as temp:
pass
with self.test_session() as sess:
w = io_ops.write_file(temp.name, contents)
sess.run(w)
with open(temp.name, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
os.remove(temp.name)
def testWriteFileCreateDir(self):
cases = ['', 'Some contents']
for contents in cases:
contents = compat.as_bytes(contents)
subdir = os.path.join(self.get_temp_dir(), 'subdir1')
filepath = os.path.join(subdir, 'subdir2', 'filename')
with self.test_session() as sess:
w = io_ops.write_file(filepath, contents)
sess.run(w)
with open(filepath, 'rb') as f:
file_contents = f.read()
self.assertEqual(file_contents, contents)
shutil.rmtree(subdir)
def _subset(self, files, indices):
return set(
compat.as_bytes(files[i].name) for i in range(len(files))
if i in indices)
def testMatchingFiles(self):
cases = [
'ABcDEF.GH', 'ABzDEF.GH', 'ABasdfjklDEF.GH', 'AB3DEF.GH', 'AB4DEF.GH',
'ABDEF.GH', 'XYZ'
]
files = [
tempfile.NamedTemporaryFile(
prefix=c, dir=self.get_temp_dir(), delete=True) for c in cases
]
with self.test_session():
# Test exact match without wildcards.
for f in files:
self.assertEqual(
io_ops.matching_files(f.name).eval(), compat.as_bytes(f.name))
# We will look for files matching "ABxDEF.GH*" where "x" is some wildcard.
directory_path = files[0].name[:files[0].name.find(cases[0])]
pattern = directory_path + 'AB%sDEF.GH*'
self.assertEqual(
set(io_ops.matching_files(pattern % 'z').eval()),
self._subset(files, [1]))
self.assertEqual(
set(io_ops.matching_files(pattern % '?').eval()),
self._subset(files, [0, 1, 3, 4]))
self.assertEqual(
set(io_ops.matching_files(pattern % '*').eval()),
self._subset(files, [0, 1, 2, 3, 4, 5]))
# NOTE(mrry): Windows uses PathMatchSpec to match file patterns, which
# does not support the following expressions.
if os.name != 'nt':
self.assertEqual(
set(io_ops.matching_files(pattern % '[cxz]').eval()),
self._subset(files, [0, 1]))
self.assertEqual(
set(io_ops.matching_files(pattern % '[0-9]').eval()),
self._subset(files, [3, 4]))
# Test an empty list input.
self.assertItemsEqual(io_ops.matching_files([]).eval(), [])
# Test multiple exact filenames.
self.assertItemsEqual(
io_ops.matching_files([
files[0].name, files[1].name, files[2].name]).eval(),
self._subset(files, [0, 1, 2]))
# Test multiple globs.
self.assertItemsEqual(
io_ops.matching_files([
pattern % '?', directory_path + 'X?Z*']).eval(),
self._subset(files, [0, 1, 3, 4, 6]))
for f in files:
f.close()
if __name__ == '__main__':
test.main()
| apache-2.0 |
yd0str/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| gpl-3.0 |
richardcs/ansible | lib/ansible/modules/network/f5/bigip_software_update.py | 11 | 9756 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP
description:
- Manage the software update settings of a BIG-IP.
version_added: 2.5
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
type: bool
auto_phone_home:
description:
- Specifies whether to automatically send phone home data to the
F5 Networks PhoneHome server.
type: bool
frequency:
description:
- Specifies the schedule for the automatic update check.
choices:
- daily
- monthly
- weekly
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Enable automatic update checking
bigip_software_update:
auto_check: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Disable automatic update checking and phoning home
bigip_software_update:
auto_check: no
auto_phone_home: no
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
auto_check:
description: Whether the system checks for updates automatically.
returned: changed
type: bool
sample: True
auto_phone_home:
description: Whether the system automatically sends phone home data.
returned: changed
type: bool
sample: True
frequency:
description: Frequency of auto update checks
returned: changed
type: string
sample: weekly
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check',
'autoPhonehome': 'auto_phone_home'
}
api_attributes = [
'autoCheck', 'autoPhonehome', 'frequency',
]
updatables = [
'auto_check', 'auto_phone_home', 'frequency',
]
returnables = [
'auto_check', 'auto_phone_home', 'frequency',
]
class ApiParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
return self._values['auto_check']
class ModuleParameters(Parameters):
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] is True:
return 'enabled'
else:
return 'disabled'
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] is None:
return None
elif self._values['auto_phone_home'] is True:
return 'enabled'
else:
return 'disabled'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def auto_check(self):
if self._values['auto_check'] == 'enabled':
return True
elif self._values['auto_check'] == 'disabled':
return False
@property
def auto_phone_home(self):
if self._values['auto_phone_home'] == 'enabled':
return True
elif self._values['auto_phone_home'] == 'disabled':
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def exec_module(self): # lgtm [py/similar-function]
result = dict()
changed = self.update()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/software/update/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/software/update/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
auto_check=dict(
type='bool'
),
auto_phone_home=dict(
type='bool'
),
frequency=dict(
choices=['daily', 'monthly', 'weekly']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
joshuahoman/vivisect | vstruct/qt/__init__.py | 6 | 5522 | '''
Some utils for QT code which uses vstruct...
'''
import vqt.tree as vq_tree
from PyQt4 import QtCore, QtGui
from vqt.main import idlethread, idlethreadsync
class VQStructNamespacesView(vq_tree.VQTreeView):
def __init__(self, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
#model = vq_tree.VTreeView(parent=self, columns=('Namespace', 'Structure'))
class VQStructSelectView(vq_tree.VQTreeView):
def __init__(self, vsbuilder, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
self.vsbuilder = vsbuilder
model = vq_tree.VQTreeModel(parent=self, columns=('Namespace', 'Structure'))
for nsname in vsbuilder.getVStructNamespaceNames():
pnode = model.append((nsname, ''))
pnode.structname = None
for sname in vsbuilder.getVStructNames(namespace=nsname):
spnode = model.append(('', sname), parent=pnode)
spnode.structname = '%s.%s' % (nsname, sname)
for sname in vsbuilder.getVStructNames():
node = model.append( ('', sname ) )
node.structname = sname
self.setModel(model)
class VQStructSelectDialog(QtGui.QDialog):
def __init__(self, vsbuilder, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.structname = None
self.setWindowTitle('Select a structure...')
vlyt = QtGui.QVBoxLayout()
hlyt = QtGui.QHBoxLayout()
self.structtree = VQStructSelectView(vsbuilder, parent=self)
hbox = QtGui.QWidget(parent=self)
ok = QtGui.QPushButton("Ok", parent=hbox)
cancel = QtGui.QPushButton("Cancel", parent=hbox)
self.structtree.doubleClicked.connect( self.dialog_activated )
ok.clicked.connect(self.dialog_ok)
cancel.clicked.connect(self.dialog_cancel)
hlyt.addStretch(1)
hlyt.addWidget(cancel)
hlyt.addWidget(ok)
hbox.setLayout(hlyt)
vlyt.addWidget(self.structtree)
vlyt.addWidget(hbox)
self.setLayout(vlyt)
self.resize(500, 500)
def dialog_activated(self, idx):
if idx.isValid():
pnode = idx.internalPointer()
self.structname = pnode.structname
self.accept()
def dialog_ok(self):
for idx in self.structtree.selectedIndexes():
pnode = idx.internalPointer()
self.structname = pnode.structname
self.accept()
def dialog_cancel(self):
self.reject()
@idlethreadsync
def selectStructure(vsbuilder, parent=None):
d = VQStructSelectDialog(vsbuilder, parent=parent)
r = d.exec_()
return d.structname
class VQStructNamespacesView(vq_tree.VQTreeView):
def __init__(self, parent=None):
vq_tree.VQTreeView.__init__(self, parent=parent)
model = vq_tree.VQTreeModel(parent=self, columns=('Subsystem', 'Module Name'))
win = model.append(('windows', ''))
xp_i386_user = model.append(('Windows XP i386 Userland', ''), parent=win)
xp_i386_ntdll = model.append(('','ntdll'), parent=xp_i386_user)
xp_i386_ntdll.modinfo = ('ntdll','vstruct.defs.windows.win_5_1_i386.ntdll')
xp_i386_kern = model.append(('Windows XP i386 Kernel', ''), parent=win)
xp_i386_nt = model.append(('','nt'), parent=xp_i386_kern)
xp_i386_nt.modinfo = ('nt','vstruct.defs.windows.win_5_1_i386.ntoskrnl')
xp_i386_win32k = model.append(('','win32k'), parent=xp_i386_kern)
xp_i386_win32k.modinfo = ('win32k','vstruct.defs.windows.win_5_1_i386.win32k')
win7_amd64_user = model.append(('Windows 7 amd64 Userland', ''), parent=win)
win7_amd64_ntdll = model.append(('','ntdll'), parent=win7_amd64_user)
win7_amd64_ntdll.modinfo = ('ntdll','vstruct.defs.windows.win_6_1_amd64.ntdll')
pos = model.append(('posix',''))
pos_elf = model.append(('', 'Elf'), parent=pos)
pos_elf.modinfo = ('elf', 'vstruct.defs.elf')
self.setModel(model)
class VQStructNamespaceDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self.modinfo = None
self.setWindowTitle('Select a module...')
vlyt = QtGui.QVBoxLayout()
hlyt = QtGui.QHBoxLayout()
self.structtree = VQStructNamespacesView(parent=self)
hbox = QtGui.QWidget(parent=self)
ok = QtGui.QPushButton("Ok", parent=hbox)
cancel = QtGui.QPushButton("Cancel", parent=hbox)
self.structtree.doubleClicked.connect( self.dialog_activated )
ok.clicked.connect(self.dialog_ok)
cancel.clicked.connect(self.dialog_cancel)
hlyt.addStretch(1)
hlyt.addWidget(cancel)
hlyt.addWidget(ok)
hbox.setLayout(hlyt)
vlyt.addWidget(self.structtree)
vlyt.addWidget(hbox)
self.setLayout(vlyt)
self.resize(500, 500)
def dialog_activated(self, idx):
if idx.isValid():
pnode = idx.internalPointer()
self.modinfo = getattr(pnode, 'modinfo', None)
self.accept()
def dialog_ok(self):
for idx in self.structtree.selectedIndexes():
pnode = idx.internalPointer()
self.modinfo = getattr(pnode, 'modinfo', None)
self.accept()
def dialog_cancel(self):
self.reject()
@idlethreadsync
def selectStructNamespace(parent=None):
d = VQStructNamespaceDialog(parent=parent)
r = d.exec_()
return d.modinfo
| apache-2.0 |
infrascloudy/flask-base | app/models/user.py | 1 | 6385 | from flask import current_app
from flask_login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from werkzeug.security import check_password_hash, generate_password_hash
from app import db, login_manager
class Permission:
GENERAL = 0x01
ADMINISTER = 0xff
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
index = db.Column(db.String(64))
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.GENERAL, 'main', True),
'Administrator': (
Permission.ADMINISTER,
'admin',
False # grants all permissions
)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.index = roles[r][1]
role.default = roles[r][2]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role \'%s\'>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
confirmed = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(64), index=True)
last_name = db.Column(db.String(64), index=True)
email = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMINISTER).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_admin(self):
return self.can(Permission.ADMINISTER)
@property
def password(self):
raise AttributeError('`password` is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=604800):
"""Generate a confirmation token to email a new user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_email_change_token(self, new_email, expiration=3600):
"""Generate an email change token to email an existing user."""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def generate_password_reset_token(self, expiration=3600):
"""
Generate a password reset change token to email to an existing user.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def confirm_account(self, token):
"""Verify that the provided token is for this user's id."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def change_email(self, token):
"""Verify the new email for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
db.session.commit()
return True
def reset_password(self, token, new_password):
"""Verify the new password for this user."""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except (BadSignature, SignatureExpired):
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
@staticmethod
def generate_fake(count=100, **kwargs):
"""Generate a number of fake users for testing."""
from sqlalchemy.exc import IntegrityError
from random import seed, choice
from faker import Faker
fake = Faker()
roles = Role.query.all()
seed()
for i in range(count):
u = User(
first_name=fake.first_name(),
last_name=fake.last_name(),
email=fake.email(),
password=fake.password(),
confirmed=True,
role=choice(roles),
**kwargs)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __repr__(self):
return '<User \'%s\'>' % self.full_name()
class AnonymousUser(AnonymousUserMixin):
def can(self, _):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit |
joernhees/git-hg-remote-bug_gae-init | main/lib/werkzeug/contrib/atom.py | 7 | 15329 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(category[k], True)) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| mit |
75651/kbengine_cloud | kbe/src/lib/python/Lib/hmac.py | 142 | 5063 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + bytes(blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| lgpl-3.0 |
edwardzhou1980/bite-project | deps/mrtaskman/server/util/model_to_dict.py | 16 | 1536 | """Converts AppEngine db.Model's to JSON."""
from google.appengine.ext import db
from google.appengine.ext.blobstore import blobstore
import datetime
import json
import logging
import time
from util import db_properties
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
def ModelToDict(model):
"""Returns dictionary from given db.Model."""
if not isinstance(model, db.Model):
logging.error('%s is not an instance of db.Model. It is %s',
model, model.__class__)
assert isinstance(model, db.Model)
output = {}
output['id'] = model.key().id_or_name()
for key, prop in model.properties().iteritems():
value = getattr(model, key)
if value is None:
output[key] = value
elif isinstance(prop, db_properties.JsonProperty):
output[key] = json.loads(value)
elif isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(prop, blobstore.BlobReferenceProperty):
# TODO: Implement this if it's needed.
output[key] = 'UnimplementedBlobRef'
elif isinstance(value, db.Model):
output[key] = ModelToDict(value)
else:
raise ValueError('cannot encode ' + repr(prop))
return output
| apache-2.0 |
georgewhewell/CouchPotatoServer | libs/xmpp/auth.py | 196 | 15633 | ## auth.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: auth.py,v 1.41 2008/09/13 21:45:21 normanr Exp $
"""
Provides library with all Non-SASL and SASL authentication mechanisms.
Can be used both for client and transport authentication.
"""
from protocol import *
from client import PlugIn
import sha,base64,random,dispatcher,re
import md5
def HH(some): return md5.new(some).hexdigest()
def H(some): return md5.new(some).digest()
def C(some): return ':'.join(some)
class NonSASL(PlugIn):
""" Implements old Non-SASL (JEP-0078) authentication used in jabberd1.4 and transport authentication."""
def __init__(self,user,password,resource):
""" Caches username, password and resource for auth. """
PlugIn.__init__(self)
self.DBG_LINE='gen_auth'
self.user=user
self.password=password
self.resource=resource
def plugin(self,owner):
""" Determine the best auth method (digest/0k/plain) and use it for auth.
Returns used method name on success. Used internally. """
if not self.resource: return self.authComponent(owner)
self.DEBUG('Querying server about possible auth methods','start')
resp=owner.Dispatcher.SendAndWaitForResponse(Iq('get',NS_AUTH,payload=[Node('username',payload=[self.user])]))
if not isResultNode(resp):
self.DEBUG('No result node arrived! Aborting...','error')
return
iq=Iq(typ='set',node=resp)
query=iq.getTag('query')
query.setTagData('username',self.user)
query.setTagData('resource',self.resource)
if query.getTag('digest'):
self.DEBUG("Performing digest authentication",'ok')
query.setTagData('digest',sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest())
if query.getTag('password'): query.delChild('password')
method='digest'
elif query.getTag('token'):
token=query.getTagData('token')
seq=query.getTagData('sequence')
self.DEBUG("Performing zero-k authentication",'ok')
hash = sha.new(sha.new(self.password).hexdigest()+token).hexdigest()
for foo in xrange(int(seq)): hash = sha.new(hash).hexdigest()
query.setTagData('hash',hash)
method='0k'
else:
self.DEBUG("Sequre methods unsupported, performing plain text authentication",'warn')
query.setTagData('password',self.password)
method='plain'
resp=owner.Dispatcher.SendAndWaitForResponse(iq)
if isResultNode(resp):
self.DEBUG('Sucessfully authenticated with remove host.','ok')
owner.User=self.user
owner.Resource=self.resource
owner._registered_name=owner.User+'@'+owner.Server+'/'+owner.Resource
return method
self.DEBUG('Authentication failed!','error')
def authComponent(self,owner):
""" Authenticate component. Send handshake stanza and wait for result. Returns "ok" on success. """
self.handshake=0
owner.send(Node(NS_COMPONENT_ACCEPT+' handshake',payload=[sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest()]))
owner.RegisterHandler('handshake',self.handshakeHandler,xmlns=NS_COMPONENT_ACCEPT)
while not self.handshake:
self.DEBUG("waiting on handshake",'notify')
owner.Process(1)
owner._registered_name=self.user
if self.handshake+1: return 'ok'
def handshakeHandler(self,disp,stanza):
""" Handler for registering in dispatcher for accepting transport authentication. """
if stanza.getName()=='handshake': self.handshake=1
else: self.handshake=-1
class SASL(PlugIn):
""" Implements SASL authentication. """
def __init__(self,username,password):
PlugIn.__init__(self)
self.username=username
self.password=password
def plugin(self,owner):
if not self._owner.Dispatcher.Stream._document_attrs.has_key('version'): self.startsasl='not-supported'
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self.startsasl=None
def auth(self):
""" Start authentication. Result can be obtained via "SASL.startsasl" attribute and will be
either "success" or "failure". Note that successfull auth will take at least
two Dispatcher.Process() calls. """
if self.startsasl: pass
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove SASL handlers from owner's dispatcher. Used internally. """
if self._owner.__dict__.has_key('features'): self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
if self._owner.__dict__.has_key('challenge'): self._owner.UnregisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('failure'): self._owner.UnregisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('success'): self._owner.UnregisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
def FeaturesHandler(self,conn,feats):
""" Used to determine if server supports SASL auth. Used internally. """
if not feats.getTag('mechanisms',namespace=NS_SASL):
self.startsasl='not-supported'
self.DEBUG('SASL not supported by server','error')
return
mecs=[]
for mec in feats.getTag('mechanisms',namespace=NS_SASL).getTags('mechanism'):
mecs.append(mec.getData())
self._owner.RegisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
if "ANONYMOUS" in mecs and self.username == None:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'ANONYMOUS'})
elif "DIGEST-MD5" in mecs:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'DIGEST-MD5'})
elif "PLAIN" in mecs:
sasl_data='%s\x00%s\x00%s'%(self.username+'@'+self._owner.Server,self.username,self.password)
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'PLAIN'},payload=[base64.encodestring(sasl_data).replace('\r','').replace('\n','')])
else:
self.startsasl='failure'
self.DEBUG('I can only use DIGEST-MD5 and PLAIN mecanisms.','error')
return
self.startsasl='in-process'
self._owner.send(node.__str__())
raise NodeProcessed
def SASLHandler(self,conn,challenge):
""" Perform next SASL auth step. Used internally. """
if challenge.getNamespace()<>NS_SASL: return
if challenge.getName()=='failure':
self.startsasl='failure'
try: reason=challenge.getChildren()[0]
except: reason=challenge
self.DEBUG('Failed SASL authentification: %s'%reason,'error')
raise NodeProcessed
elif challenge.getName()=='success':
self.startsasl='success'
self.DEBUG('Successfully authenticated with remote server.','ok')
handlers=self._owner.Dispatcher.dumpHandlers()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
self._owner.Dispatcher.restoreHandlers(handlers)
self._owner.User=self.username
raise NodeProcessed
########################################3333
incoming_data=challenge.getData()
chal={}
data=base64.decodestring(incoming_data)
self.DEBUG('Got challenge:'+data,'ok')
for pair in re.findall('(\w+\s*=\s*(?:(?:"[^"]+")|(?:[^,]+)))',data):
key,value=[x.strip() for x in pair.split('=', 1)]
if value[:1]=='"' and value[-1:]=='"': value=value[1:-1]
chal[key]=value
if chal.has_key('qop') and 'auth' in [x.strip() for x in chal['qop'].split(',')]:
resp={}
resp['username']=self.username
resp['realm']=self._owner.Server
resp['nonce']=chal['nonce']
cnonce=''
for i in range(7):
cnonce+=hex(int(random.random()*65536*4096))[2:]
resp['cnonce']=cnonce
resp['nc']=('00000001')
resp['qop']='auth'
resp['digest-uri']='xmpp/'+self._owner.Server
A1=C([H(C([resp['username'],resp['realm'],self.password])),resp['nonce'],resp['cnonce']])
A2=C(['AUTHENTICATE',resp['digest-uri']])
response= HH(C([HH(A1),resp['nonce'],resp['nc'],resp['cnonce'],resp['qop'],HH(A2)]))
resp['response']=response
resp['charset']='utf-8'
sasl_data=''
for key in ['charset','username','realm','nonce','nc','cnonce','digest-uri','response','qop']:
if key in ['nc','qop','response','charset']: sasl_data+="%s=%s,"%(key,resp[key])
else: sasl_data+='%s="%s",'%(key,resp[key])
########################################3333
node=Node('response',attrs={'xmlns':NS_SASL},payload=[base64.encodestring(sasl_data[:-1]).replace('\r','').replace('\n','')])
self._owner.send(node.__str__())
elif chal.has_key('rspauth'): self._owner.send(Node('response',attrs={'xmlns':NS_SASL}).__str__())
else:
self.startsasl='failure'
self.DEBUG('Failed SASL authentification: unknown challenge','error')
raise NodeProcessed
class Bind(PlugIn):
""" Bind some JID to the current connection to allow router know of our location."""
def __init__(self):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove Bind handler from owner's dispatcher. Used internally. """
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,resource=None):
""" Perform binding. Use provided resource name or random (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if resource: resource=[Node('resource',payload=[resource])]
else: resource=[]
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('bind',attrs={'xmlns':NS_BIND},payload=resource)]))
if isResultNode(resp):
self.bound.append(resp.getTag('bind').getTagData('jid'))
self.DEBUG('Successfully bound %s.'%self.bound[-1],'ok')
jid=JID(resp.getTag('bind').getTagData('jid'))
self._owner.User=jid.getNode()
self._owner.Resource=jid.getResource()
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('session',attrs={'xmlns':NS_SESSION})]))
if isResultNode(resp):
self.DEBUG('Successfully opened session.','ok')
self.session=1
return 'ok'
else:
self.DEBUG('Session open failed.','error')
self.session=0
elif resp: self.DEBUG('Binding failed: %s.'%resp.getTag('error'),'error')
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
class ComponentBind(PlugIn):
""" ComponentBind some JID to the current connection to allow router know of our location."""
def __init__(self, sasl):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
self.needsUnregister=None
self.sasl = sasl
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if not self.sasl:
self.bound=[]
return
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else:
self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
self.needsUnregister=1
def plugout(self):
""" Remove ComponentBind handler from owner's dispatcher. Used internally. """
if self.needsUnregister:
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,domain=None):
""" Perform binding. Use provided domain name (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if self.sasl:
xmlns = NS_COMPONENT_1
else:
xmlns = None
self.bindresponse = None
ttl = dispatcher.DefaultTimeout
self._owner.RegisterHandler('bind',self.BindHandler,xmlns=xmlns)
self._owner.send(Protocol('bind',attrs={'name':domain},xmlns=NS_COMPONENT_1))
while self.bindresponse is None and self._owner.Process(1) and ttl > 0: ttl-=1
self._owner.UnregisterHandler('bind',self.BindHandler,xmlns=xmlns)
resp=self.bindresponse
if resp and resp.getAttr('error'):
self.DEBUG('Binding failed: %s.'%resp.getAttr('error'),'error')
elif resp:
self.DEBUG('Successfully bound.','ok')
return 'ok'
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
def BindHandler(self,conn,bind):
self.bindresponse = bind
pass
| gpl-3.0 |
zakuro9715/lettuce | tests/integration/lib/Django-1.2.5/django/core/mail/backends/console.py | 308 | 1295 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
try:
# The try-except is nested to allow for
# Python 2.4 support (Refs #12147)
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
finally:
self._lock.release()
return len(email_messages)
| gpl-3.0 |
cnbeining/you-get | src/you_get/extractors/huaban.py | 7 | 2281 | #!/usr/bin/env python
import json
import os
import re
import math
import traceback
import urllib.parse as urlparse
from ..common import *
__all__ = ['huaban_download']
site_info = '花瓣 (Huaban)'
LIMIT = 100
class Board:
def __init__(self, title, pins):
self.title = title
self.pins = pins
self.pin_count = len(pins)
class Pin:
host = 'http://img.hb.aicdn.com/'
def __init__(self, pin_json):
img_file = pin_json['file']
self.id = str(pin_json['pin_id'])
self.url = urlparse.urljoin(self.host, img_file['key'])
self.ext = img_file['type'].split('/')[-1]
def construct_url(url, **params):
param_str = urlparse.urlencode(params)
return url + '?' + param_str
def extract_json_data(url, **params):
url = construct_url(url, **params)
html = get_content(url, headers=fake_headers)
json_string = match1(html, r'app.page\["board"\] = (.*?});')
json_data = json.loads(json_string)
return json_data
def extract_board_data(url):
json_data = extract_json_data(url, limit=LIMIT)
pin_list = json_data['pins']
title = json_data['title']
pin_count = json_data['pin_count']
pin_count -= len(pin_list)
while pin_count > 0:
json_data = extract_json_data(url, max=pin_list[-1]['pin_id'],
limit=LIMIT)
pins = json_data['pins']
pin_list += pins
pin_count -= len(pins)
return Board(title, list(map(Pin, pin_list)))
def huaban_download_board(url, output_dir, **kwargs):
kwargs['merge'] = False
board = extract_board_data(url)
output_dir = os.path.join(output_dir, board.title)
print_info(site_info, board.title, 'jpg', float('Inf'))
for pin in board.pins:
download_urls([pin.url], pin.id, pin.ext, float('Inf'),
output_dir=output_dir, faker=True, **kwargs)
def huaban_download(url, output_dir='.', **kwargs):
if re.match(r'http://huaban\.com/boards/\d+/', url):
huaban_download_board(url, output_dir, **kwargs)
else:
print('Only board (画板) pages are supported currently')
print('ex: http://huaban.com/boards/12345678/')
download = huaban_download
download_playlist = playlist_not_supported("huaban")
| mit |
ME-ICA/me-ica | meica.libs/mdp/graph/graph.py | 1 | 13012 | # inspired by some code by Nathan Denny (1999)
# see http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html
try:
# use reduce against BDFL's will even on python > 2.6
from functools import reduce
except ImportError:
pass
class GraphException(Exception):
"""Base class for exception in the graph package."""
pass
class GraphTopologicalException(GraphException):
"""Exception thrown during a topological sort if the graph is cyclical."""
pass
def is_sequence(x):
return isinstance(x, (list, tuple))
def recursive_map(func, seq):
"""Apply a function recursively on a sequence and all subsequences."""
def _func(x):
if is_sequence(x):
return recursive_map(func, x)
else:
return func(x)
return map(_func, seq)
def recursive_reduce(func, seq, *argv):
"""Apply reduce(func, seq) recursively to a sequence and all its
subsequences."""
def _func(x, y):
if is_sequence(y):
return func(x, recursive_reduce(func, y))
else:
return func(x, y)
return reduce(_func, seq, *argv)
class GraphNode(object):
"""Represent a graph node and all information attached to it."""
def __init__(self, data=None):
self.data = data
# edges in
self.ein = []
# edges out
self.eout = []
def add_edge_in(self, edge):
self.ein.append(edge)
def add_edge_out(self, edge):
self.eout.append(edge)
def remove_edge_in(self, edge):
self.ein.remove(edge)
def remove_edge_out(self, edge):
self.eout.remove(edge)
def get_edges_in(self, from_ = None):
"""Return a copy of the list of the entering edges. If from_
is specified, return only the nodes coming from that node."""
inedges = self.ein[:]
if from_:
inedges = [edge for edge in inedges if edge.head == from_]
return inedges
def get_edges_out(self, to_ = None):
"""Return a copy of the list of the outgoing edges. If to_
is specified, return only the nodes going to that node."""
outedges = self.eout[:]
if to_:
outedges = [edge for edge in outedges if edge.tail == to_]
return outedges
def get_edges(self, neighbor = None):
"""Return a copy of all edges. If neighbor is specified, return
only the edges connected to that node."""
return ( self.get_edges_in(from_=neighbor) +
self.get_edges_out(to_=neighbor) )
def in_degree(self):
"""Return the number of entering edges."""
return len(self.ein)
def out_degree(self):
"""Return the number of outgoing edges."""
return len(self.eout)
def degree(self):
"""Return the number of edges."""
return self.in_degree()+self.out_degree()
def in_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_head(), self.ein)
def out_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_tail(), self.eout)
def neighbors(self):
return self.in_neighbors() + self.out_neighbors()
class GraphEdge(object):
"""Represent a graph edge and all information attached to it."""
def __init__(self, head, tail, data=None):
# head node
self.head = head
# neighbors out
self.tail = tail
# arbitrary data slot
self.data = data
def get_ends(self):
"""Return the tuple (head_id, tail_id)."""
return (self.head, self.tail)
def get_tail(self):
return self.tail
def get_head(self):
return self.head
class Graph(object):
"""Represent a directed graph."""
def __init__(self):
# list of nodes
self.nodes = []
# list of edges
self.edges = []
# node functions
def add_node(self, data=None):
node = GraphNode(data=data)
self.nodes.append(node)
return node
def remove_node(self, node):
# the node is not in this graph
if node not in self.nodes:
errstr = 'This node is not part of the graph (%s)' % node
raise GraphException(errstr)
# remove all edges containing this node
for edge in node.get_edges():
self.remove_edge(edge)
# remove the node
self.nodes.remove(node)
# edge functions
def add_edge(self, head, tail, data=None):
"""Add an edge going from head to tail.
head : head node
tail : tail node
"""
# create edge
edge = GraphEdge(head, tail, data=data)
# add edge to head and tail node
head.add_edge_out(edge)
tail.add_edge_in(edge)
# add to the edges dictionary
self.edges.append(edge)
return edge
def remove_edge(self, edge):
head, tail = edge.get_ends()
# remove from head
head.remove_edge_out(edge)
# remove from tail
tail.remove_edge_in(edge)
# remove the edge
self.edges.remove(edge)
### populate functions
def add_nodes(self, data):
"""Add many nodes at once.
data -- number of nodes to add or sequence of data values, one for
each new node"""
if not is_sequence(data):
data = [None]*data
return map(self.add_node, data)
def add_tree(self, tree):
"""Add a tree to the graph.
The tree is specified with a nested list of tuple, in a LISP-like
notation. The values specified in the list become the values of
the single nodes.
Return an equivalent nested list with the nodes instead of the values.
Example:
>>> a=b=c=d=e=None
>>> g.add_tree( (a, b, (c, d ,e)) )
corresponds to this tree structure, with all node values set to None:
a
/ \
b c
/ \
d e
"""
def _add_edge(root, son):
self.add_edge(root, son)
return root
nodes = recursive_map(self.add_node, tree)
recursive_reduce(_add_edge, nodes)
return nodes
def add_full_connectivity(self, from_nodes, to_nodes):
"""Add full connectivity from a group of nodes to another one.
Return a list of lists of edges, one for each node in 'from_nodes'.
Example: create a two-layer graph with full connectivity.
>>> g = Graph()
>>> layer1 = g.add_nodes(10)
>>> layer2 = g.add_nodes(5)
>>> g.add_full_connectivity(layer1, layer2)
"""
edges = []
for from_ in from_nodes:
edges.append(map(lambda x: self.add_edge(from_, x), to_nodes))
return edges
###### graph algorithms
def topological_sort(self):
"""Perform a topological sort of the nodes. If the graph has a cycle,
throw a GraphTopologicalException with the list of successfully
ordered nodes."""
# topologically sorted list of the nodes (result)
topological_list = []
# queue (fifo list) of the nodes with in_degree 0
topological_queue = []
# {node: in_degree} for the remaining nodes (those with in_degree>0)
remaining_indegree = {}
# init queues and lists
for node in self.nodes:
indegree = node.in_degree()
if indegree == 0:
topological_queue.append(node)
else:
remaining_indegree[node] = indegree
# remove nodes with in_degree 0 and decrease the in_degree of their sons
while len(topological_queue):
# remove the first node with degree 0
node = topological_queue.pop(0)
topological_list.append(node)
# decrease the in_degree of the sons
for son in node.out_neighbors():
remaining_indegree[son] -= 1
if remaining_indegree[son] == 0:
topological_queue.append(son)
# if not all nodes were covered, the graph must have a cycle
# raise a GraphTopographicalException
if len(topological_list)!=len(self.nodes):
raise GraphTopologicalException(topological_list)
return topological_list
### Depth-First sort
def _dfs(self, neighbors_fct, root, visit_fct=None):
# core depth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal dfs, reverse dfs, or
# dfs on the equivalent undirected graph, respectively
# result list containing the nodes in Depth-First order
dfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# stack (lifo) list
dfs_stack = []
dfs_stack.append(root)
while len(dfs_stack):
# consider the next node on the stack
node = dfs_stack.pop()
dfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the stack (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
dfs_stack.append(son)
return dfs_list
def dfs(self, root, visit_fct=None):
"""Return a list of nodes in some Depth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
The returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root.
"""
neighbors_fct = lambda node: node.out_neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_dfs(self, root, visit_fct=None):
"""Perform Depth First sort.
This function is identical to dfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
### Connected components
def connected_components(self):
"""Return a list of lists containing the nodes of all connected
components of the graph."""
visited = {}
def visit_fct(node, visited=visited):
visited[node] = None
components = []
nodes = self.nodes
for node in nodes:
if node in visited:
continue
components.append(self.undirected_dfs(node, visit_fct))
return components
def is_weakly_connected(self):
"""Return True if the graph is weakly connected."""
return len(self.undirected_dfs(self.nodes[0]))==len(self.nodes)
### Breadth-First Sort
# BFS and DFS could be generalized to one function. I leave them
# distinct for clarity.
def _bfs(self, neighbors_fct, root, visit_fct=None):
# core breadth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal bfs, reverse bfs, or
# bfs on the equivalent undirected graph, respectively
# result list containing the nodes in Breadth-First order
bfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# queue (fifo) list
bfs_queue = []
bfs_queue.append(root)
while len(bfs_queue):
# consider the next node in the queue
node = bfs_queue.pop(0)
bfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the queue (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
bfs_queue.append(son)
return bfs_list
def bfs(self, root, visit_fct=None):
"""Return a list of nodes in some Breadth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
Note the returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root."""
neighbors_fct = lambda node: node.out_neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_bfs(self, root, visit_fct=None):
"""Perform Breadth First sort.
This function is identical to bfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
| lgpl-2.1 |
joequery/django | tests/utils_tests/test_html.py | 160 | 10711 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from datetime import datetime
from django.test import SimpleTestCase, ignore_warnings
from django.utils import html, safestring, six
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{} {} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# http://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2+3&z='), 'http://example.com/?x=1&y=2+3&z=')
self.assertEqual(quote('http://example.com/?x=<>"\''), 'http://example.com/?x=%3C%3E%22%27')
self.assertEqual(quote('http://example.com/?q=http://example.com/?x=1%26q=django'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
self.assertEqual(quote('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
def test_html_safe(self):
@html.html_safe
class HtmlClass(object):
if six.PY2:
def __unicode__(self):
return "<h1>I'm a html class!</h1>"
else:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(force_text(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
if six.PY2:
class BaseClass(object):
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __unicode__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __unicode__(self):
# overrides __unicode__ and is marked as html_safe
return 'some html safe content'
else:
class BaseClass(object):
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(force_text(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass(object):
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
method_name = '__unicode__()' if six.PY2 else '__str__()'
msg = "can't apply @html_safe to HtmlClass because it doesn't define %s." % method_name
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass(object):
pass
| bsd-3-clause |
jjhuff/fcc-comments | lib/nltk/sem/evaluate.py | 5 | 22893 | # Natural Language Toolkit: Models for first-order languages with lambda
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Ewan Klein <[email protected]>,
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
#TODO:
#- fix tracing
#- fix iterator-based approach to existentials
"""
This module provides data structures for representing first-order
models.
"""
from __future__ import print_function
from pprint import pformat
import inspect
import textwrap
from nltk.decorators import decorator
from nltk.sem.logic import (AbstractVariableExpression, AllExpression,
AndExpression, ApplicationExpression, EqualityExpression,
ExistsExpression, IffExpression, ImpExpression,
IndividualVariableExpression, LambdaExpression,
LogicParser, NegatedExpression, OrExpression,
Variable, is_indvar)
class Error(Exception): pass
class Undefined(Error): pass
def trace(f, *args, **kw):
argspec = inspect.getargspec(f)
d = dict(zip(argspec[0], args))
if d.pop('trace', None):
print()
for item in d.items():
print("%s => %s" % item)
return f(*args, **kw)
def is_rel(s):
"""
Check whether a set represents a relation (of any arity).
:param s: a set containing tuples of str elements
:type s: set
:rtype: bool
"""
# we have the empty relation, i.e. set()
if len(s) == 0:
return True
# all the elements are tuples of the same length
elif s == set([elem for elem in s if isinstance(elem, tuple)]) and\
len(max(s))==len(min(s)):
return True
else:
raise ValueError("Set %r contains sequences of different lengths" % s)
def set2rel(s):
"""
Convert a set containing individuals (strings or numbers) into a set of
unary tuples. Any tuples of strings already in the set are passed through
unchanged.
For example:
- set(['a', 'b']) => set([('a',), ('b',)])
- set([3, 27]) => set([('3',), ('27',)])
:type s: set
:rtype: set of tuple of str
"""
new = set()
for elem in s:
if isinstance(elem, str):
new.add((elem,))
elif isinstance(elem, int):
new.add((str(elem,)))
else:
new.add(elem)
return new
def arity(rel):
"""
Check the arity of a relation.
:type rel: set of tuples
:rtype: int of tuple of str
"""
if len(rel) == 0:
return 0
return len(list(rel)[0])
class Valuation(dict):
"""
A dictionary which represents a model-theoretic Valuation of non-logical constants.
Keys are strings representing the constants to be interpreted, and values correspond
to individuals (represented as strings) and n-ary relations (represented as sets of tuples
of strings).
An instance of ``Valuation`` will raise a KeyError exception (i.e.,
just behave like a standard dictionary) if indexed with an expression that
is not in its list of symbols.
"""
def __init__(self, iter):
"""
:param iter: a list of (symbol, value) pairs.
"""
dict.__init__(self)
for (sym, val) in iter:
if isinstance(val, str) or isinstance(val, bool):
self[sym] = val
elif isinstance(val, set):
self[sym] = set2rel(val)
else:
msg = textwrap.fill("Error in initializing Valuation. "
"Unrecognized value for symbol '%s':\n%s" % (sym, val), width=66)
raise ValueError(msg)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined("Unknown expression: '%s'" % key)
def __str__(self):
return pformat(self)
@property
def domain(self):
"""Set-theoretic domain of the value-space of a Valuation."""
dom = []
for val in self.values():
if isinstance(val, str):
dom.append(val)
elif not isinstance(val, bool):
dom.extend([elem for tuple in val for elem in tuple if elem is not None])
return set(dom)
@property
def symbols(self):
"""The non-logical constants which the Valuation recognizes."""
return sorted(self.keys())
class Assignment(dict):
"""
A dictionary which represents an assignment of values to variables.
An assigment can only assign values from its domain.
If an unknown expression *a* is passed to a model *M*\ 's
interpretation function *i*, *i* will first check whether *M*\ 's
valuation assigns an interpretation to *a* as a constant, and if
this fails, *i* will delegate the interpretation of *a* to
*g*. *g* only assigns values to individual variables (i.e.,
members of the class ``IndividualVariableExpression`` in the ``logic``
module. If a variable is not assigned a value by *g*, it will raise
an ``Undefined`` exception.
A variable *Assignment* is a mapping from individual variables to
entities in the domain. Individual variables are usually indicated
with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally
followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are
created using the ``Assignment`` constructor, which also takes the
domain as a parameter.
>>> from nltk.sem.evaluate import Assignment
>>> dom = set(['u1', 'u2', 'u3', 'u4'])
>>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')])
>>> g3
{'y': 'u2', 'x': 'u1'}
There is also a ``print`` format for assignments which uses a notation
closer to that in logic textbooks:
>>> print g3
g[u2/y][u1/x]
It is also possible to update an assignment using the ``add`` method:
>>> dom = set(['u1', 'u2', 'u3', 'u4'])
>>> g4 = Assignment(dom)
>>> g4.add('x', 'u1')
{'x': 'u1'}
With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary:
>>> g4.purge()
>>> g4
{}
:param domain: the domain of discourse
:type domain: set
:param assign: a list of (varname, value) associations
:type assign: list
"""
def __init__(self, domain, assign=None):
dict.__init__(self)
self.domain = domain
if assign:
for (var, val) in assign:
assert val in self.domain,\
"'%s' is not in the domain: %s" % (val, self.domain)
assert is_indvar(var),\
"Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self._addvariant()
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined("Not recognized as a variable: '%s'" % key)
def copy(self):
new = Assignment(self.domain)
new.update(self)
return new
def purge(self, var=None):
"""
Remove one or all keys (i.e. logic variables) from an
assignment, and update ``self.variant``.
:param var: a Variable acting as a key for the assignment.
"""
if var:
val = self[var]
del self[var]
else:
self.clear()
self._addvariant()
return None
def __str__(self):
"""
Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]'
"""
gstring = "g"
for (val, var) in self.variant:
gstring += "[%s/%s]" % (val, var)
return gstring
def _addvariant(self):
"""
Create a more pretty-printable version of the assignment.
"""
list = []
for item in self.items():
pair = (item[1], item[0])
list.append(pair)
self.variant = list
return None
def add(self, var, val):
"""
Add a new variable-value pair to the assignment, and update
``self.variant``.
"""
assert val in self.domain,\
"%s is not in the domain %s" % (val, self.domain)
assert is_indvar(var),\
"Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self._addvariant()
return self
class Model(object):
"""
A first order model is a domain *D* of discourse and a valuation *V*.
A domain *D* is a set, and a valuation *V* is a map that associates
expressions with values in the model.
The domain of *V* should be a subset of *D*.
Construct a new ``Model``.
:type domain: set
:param domain: A set of entities representing the domain of discourse of the model.
:type valuation: Valuation
:param valuation: the valuation of the model.
:param prop: If this is set, then we are building a propositional\
model and don't require the domain of *V* to be subset of *D*.
"""
def __init__(self, domain, valuation):
assert isinstance(domain, set)
self.domain = domain
self.valuation = valuation
if not domain.issuperset(valuation.domain):
raise Error("The valuation domain, %s, must be a subset of the model's domain, %s"\
% (valuation.domain, domain))
def __repr__(self):
return "(%r, %r)" % (self.domain, self.valuation)
def __str__(self):
return "Domain = %s,\nValuation = \n%s" % (self.domain, self.valuation)
def evaluate(self, expr, g, trace=None):
"""
Call the ``LogicParser`` to parse input expressions, and
provide a handler for ``satisfy``
that blocks further propagation of the ``Undefined`` error.
:param expr: An ``Expression`` of ``logic``.
:type g: Assignment
:param g: an assignment to individual variables.
:rtype: bool or 'Undefined'
"""
try:
lp = LogicParser()
parsed = lp.parse(expr)
value = self.satisfy(parsed, g, trace=trace)
if trace:
print()
print("'%s' evaluates to %s under M, %s" % (expr, value, g))
return value
except Undefined:
if trace:
print()
print("'%s' is undefined under M, %s" % (expr, g))
return 'Undefined'
def satisfy(self, parsed, g, trace=None):
"""
Recursive interpretation function for a formula of first-order logic.
Raises an ``Undefined`` error when ``parsed`` is an atomic string
but is not a symbol or an individual variable.
:return: Returns a truth value or ``Undefined`` if ``parsed`` is\
complex, and calls the interpretation function ``i`` if ``parsed``\
is atomic.
:param parsed: An expression of ``logic``.
:type g: Assignment
:param g: an assignment to individual variables.
"""
if isinstance(parsed, ApplicationExpression):
function, arguments = parsed.uncurry()
if isinstance(function, AbstractVariableExpression):
#It's a predicate expression ("P(x,y)"), so used uncurried arguments
funval = self.satisfy(function, g)
argvals = tuple([self.satisfy(arg, g) for arg in arguments])
return argvals in funval
else:
#It must be a lambda expression, so use curried form
funval = self.satisfy(parsed.function, g)
argval = self.satisfy(parsed.argument, g)
return funval[argval]
elif isinstance(parsed, NegatedExpression):
return not self.satisfy(parsed.term, g)
elif isinstance(parsed, AndExpression):
return self.satisfy(parsed.first, g) and \
self.satisfy(parsed.second, g)
elif isinstance(parsed, OrExpression):
return self.satisfy(parsed.first, g) or \
self.satisfy(parsed.second, g)
elif isinstance(parsed, ImpExpression):
return (not self.satisfy(parsed.first, g)) or \
self.satisfy(parsed.second, g)
elif isinstance(parsed, IffExpression):
return self.satisfy(parsed.first, g) == \
self.satisfy(parsed.second, g)
elif isinstance(parsed, EqualityExpression):
return self.satisfy(parsed.first, g) == \
self.satisfy(parsed.second, g)
elif isinstance(parsed, AllExpression):
new_g = g.copy()
for u in self.domain:
new_g.add(parsed.variable.name, u)
if not self.satisfy(parsed.term, new_g):
return False
return True
elif isinstance(parsed, ExistsExpression):
new_g = g.copy()
for u in self.domain:
new_g.add(parsed.variable.name, u)
if self.satisfy(parsed.term, new_g):
return True
return False
elif isinstance(parsed, LambdaExpression):
cf = {}
var = parsed.variable.name
for u in self.domain:
val = self.satisfy(parsed.term, g.add(var, u))
# NB the dict would be a lot smaller if we do this:
# if val: cf[u] = val
# But then need to deal with cases where f(a) should yield
# a function rather than just False.
cf[u] = val
return cf
else:
return self.i(parsed, g, trace)
#@decorator(trace_eval)
def i(self, parsed, g, trace=False):
"""
An interpretation function.
Assuming that ``parsed`` is atomic:
- if ``parsed`` is a non-logical constant, calls the valuation *V*
- else if ``parsed`` is an individual variable, calls assignment *g*
- else returns ``Undefined``.
:param parsed: an ``Expression`` of ``logic``.
:type g: Assignment
:param g: an assignment to individual variables.
:return: a semantic value
"""
# If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols
# and also be an IndividualVariableExpression. We want to catch this first case.
# So there is a procedural consequence to the ordering of clauses here:
if parsed.variable.name in self.valuation.symbols:
return self.valuation[parsed.variable.name]
elif isinstance(parsed, IndividualVariableExpression):
return g[parsed.variable.name]
else:
raise Undefined("Can't find a value for %s" % parsed)
def satisfiers(self, parsed, varex, g, trace=None, nesting=0):
"""
Generate the entities from the model's domain that satisfy an open formula.
:param parsed: an open formula
:type parsed: Expression
:param varex: the relevant free individual variable in ``parsed``.
:type varex: VariableExpression or str
:param g: a variable assignment
:type g: Assignment
:return: a set of the entities that satisfy ``parsed``.
"""
spacer = ' '
indent = spacer + (spacer * nesting)
candidates = []
if isinstance(varex, str):
var = Variable(varex)
else:
var = varex
if var in parsed.free():
if trace:
print()
print((spacer * nesting) + "Open formula is '%s' with assignment %s" % (parsed, g))
for u in self.domain:
new_g = g.copy()
new_g.add(var.name, u)
if trace > 1:
lowtrace = trace-1
else:
lowtrace = 0
value = self.satisfy(parsed, new_g, lowtrace)
if trace:
print(indent + "(trying assignment %s)" % new_g)
# parsed == False under g[u/var]?
if value == False:
if trace:
print(indent + "value of '%s' under %s is False" % (parsed, new_g))
# so g[u/var] is a satisfying assignment
else:
candidates.append(u)
if trace:
print(indent + "value of '%s' under %s is %s" % (parsed, new_g, value))
result = set(c for c in candidates)
# var isn't free in parsed
else:
raise Undefined("%s is not free in %s" % (var.name, parsed))
return result
#//////////////////////////////////////////////////////////////////////
# Demo..
#//////////////////////////////////////////////////////////////////////
# number of spacer chars
mult = 30
# Demo 1: Propositional Logic
#################
def propdemo(trace=None):
"""Example of a propositional model."""
global val1, dom1, m1, g1
val1 = Valuation([('P', True), ('Q', True), ('R', False)])
dom1 = set([])
m1 = Model(dom1, val1)
g1 = Assignment(dom1)
print()
print('*' * mult)
print("Propositional Formulas Demo")
print('*' * mult)
print('(Propositional constants treated as nullary predicates)')
print()
print("Model m1:\n", m1)
print('*' * mult)
sentences = [
'(P & Q)',
'(P & R)',
'- P',
'- R',
'- - P',
'- (P & R)',
'(P | R)',
'(R | P)',
'(R | R)',
'(- P | R)',
'(P | - P)',
'(P -> Q)',
'(P -> R)',
'(R -> P)',
'(P <-> P)',
'(R <-> R)',
'(P <-> R)',
]
for sent in sentences:
if trace:
print()
m1.evaluate(sent, g1, trace)
else:
print("The value of '%s' is: %s" % (sent, m1.evaluate(sent, g1)))
# Demo 2: FOL Model
#############
def folmodel(quiet=False, trace=None):
"""Example of a first-order model."""
global val2, v2, dom2, m2, g2
v2 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
val2 = Valuation(v2)
dom2 = val2.domain
m2 = Model(dom2, val2)
g2 = Assignment(dom2, [('x', 'b1'), ('y', 'g2')])
if not quiet:
print()
print('*' * mult)
print("Models Demo")
print("*" * mult)
print("Model m2:\n", "-" * 14,"\n", m2)
print("Variable assignment = ", g2)
exprs = ['adam', 'boy', 'love', 'walks', 'x', 'y', 'z']
lp = LogicParser()
parsed_exprs = [lp.parse(e) for e in exprs]
print()
for parsed in parsed_exprs:
try:
print("The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2)))
except Undefined:
print("The interpretation of '%s' in m2 is Undefined" % parsed)
applications = [('boy', ('adam')), ('walks', ('adam',)), ('love', ('adam', 'y')), ('love', ('y', 'adam'))]
for (fun, args) in applications:
try:
funval = m2.i(lp.parse(fun), g2)
argsval = tuple(m2.i(lp.parse(arg), g2) for arg in args)
print("%s(%s) evaluates to %s" % (fun, args, argsval in funval))
except Undefined:
print("%s(%s) evaluates to Undefined" % (fun, args))
# Demo 3: FOL
#########
def foldemo(trace=None):
"""
Interpretation of closed expressions in a first-order model.
"""
folmodel(quiet=True)
print()
print('*' * mult)
print("FOL Formulas Demo")
print('*' * mult)
formulas = [
'love (adam, betty)',
'(adam = mia)',
'\\x. (boy(x) | girl(x))',
'\\x. boy(x)(adam)',
'\\x y. love(x, y)',
'\\x y. love(x, y)(adam)(betty)',
'\\x y. love(x, y)(adam, betty)',
'\\x y. (boy(x) & love(x, y))',
'\\x. exists y. (boy(x) & love(x, y))',
'exists z1. boy(z1)',
'exists x. (boy(x) & -(x = adam))',
'exists x. (boy(x) & all y. love(y, x))',
'all x. (boy(x) | girl(x))',
'all x. (girl(x) -> exists y. boy(y) & love(x, y))', #Every girl loves exists boy.
'exists x. (boy(x) & all y. (girl(y) -> love(y, x)))', #There is exists boy that every girl loves.
'exists x. (boy(x) & all y. (girl(y) -> love(x, y)))', #exists boy loves every girl.
'all x. (dog(x) -> - girl(x))',
'exists x. exists y. (love(x, y) & love(x, y))'
]
for fmla in formulas:
g2.purge()
if trace:
m2.evaluate(fmla, g2, trace)
else:
print("The value of '%s' is: %s" % (fmla, m2.evaluate(fmla, g2)))
# Demo 3: Satisfaction
#############
def satdemo(trace=None):
"""Satisfiers of an open formula in a first order model."""
print()
print('*' * mult)
print("Satisfiers Demo")
print('*' * mult)
folmodel(quiet=True)
formulas = [
'boy(x)',
'(x = x)',
'(boy(x) | girl(x))',
'(boy(x) & girl(x))',
'love(adam, x)',
'love(x, adam)',
'-(x = adam)',
'exists z22. love(x, z22)',
'exists y. love(y, x)',
'all y. (girl(y) -> love(x, y))',
'all y. (girl(y) -> love(y, x))',
'all y. (girl(y) -> (boy(x) & love(y, x)))',
'(boy(x) & all y. (girl(y) -> love(x, y)))',
'(boy(x) & all y. (girl(y) -> love(y, x)))',
'(boy(x) & exists y. (girl(y) & love(y, x)))',
'(girl(x) -> dog(x))',
'all y. (dog(y) -> (x = y))',
'exists y. love(y, x)',
'exists y. (love(adam, y) & love(y, x))'
]
if trace:
print(m2)
lp = LogicParser()
for fmla in formulas:
print(fmla)
lp.parse(fmla)
parsed = [lp.parse(fmla) for fmla in formulas]
for p in parsed:
g2.purge()
print("The satisfiers of '%s' are: %s" % (p, m2.satisfiers(p, 'x', g2, trace)))
def demo(num=0, trace=None):
"""
Run exists demos.
- num = 1: propositional logic demo
- num = 2: first order model demo (only if trace is set)
- num = 3: first order sentences demo
- num = 4: satisfaction of open formulas demo
- any other value: run all the demos
:param trace: trace = 1, or trace = 2 for more verbose tracing
"""
demos = {
1: propdemo,
2: folmodel,
3: foldemo,
4: satdemo}
try:
demos[num](trace=trace)
except KeyError:
for num in demos:
demos[num](trace=trace)
if __name__ == "__main__":
demo(2, trace=0)
| apache-2.0 |
caffeinehit/yell | yell/backends/celery.py | 1 | 2316 | from __future__ import absolute_import
from celery.task import Task
from yell import Notification, notify, registry
class CeleryNotificationTask(Task):
""" Dispatch and run the notification. """
def run(self, name=None, backend=None, *args, **kwargs):
"""
The Celery task.
Delivers the notification via all backends returned by :param:`backend`.
"""
assert name is not None, "No 'name' specified to notify"
assert backend is not None, "No 'backend' specified to notify with"
backends = backend().get_backends(*args, **kwargs)
notify(name, backends=backends, *args, **kwargs)
class CeleryNotification(Notification):
"""
Delivers notifications through Celery.
:example:
::
from yell import notify, Notification
class EmailNotification(Notification):
name = 'async'
def notify(self, *args, **kwargs):
# Deliver email
class DBNotification(Notification):
name = 'async'
def notify(self, *args, **kwargs):
# Save to database
class AsyncNotification(CeleryNotification):
name = 'async'
notify('async', backends = [AsyncNotification],
text = "This notification is routed through Celery before being sent and saved")
In the above example when calling :attr:`yell.notify` will invoke ``EmailNotification`` and
``DBNotification`` once the task was delivered through Celery.
"""
name = None
"""
The name of this notification. Override in subclasses.
"""
def get_backends(self, *args, **kwargs):
"""
Return all backends the task should use to deliver notifications.
By default all backends with the same :attr:`name` except for subclasses
of :class:`CeleryNotifications` will be used.
"""
return filter(lambda cls: not issubclass(cls, self.__class__), registry.notifications[self.name])
def notify(self, *args, **kwargs):
"""
Dispatches the notification to Celery
"""
return CeleryNotificationTask.delay(name=self.name, backend=self.__class__, *args, **kwargs)
| mit |
TimYi/django | tests/one_to_one/models.py | 203 | 3343 | """
One-to-one relationships
To define a one-to-one relationship, use ``OneToOneField()``.
In this example, a ``Place`` optionally can be a ``Restaurant``.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(models.Model):
place = models.OneToOneField(Place, models.CASCADE, primary_key=True)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.place.name
@python_2_unicode_compatible
class Bar(models.Model):
place = models.OneToOneField(Place, models.CASCADE)
serves_cocktails = models.BooleanField(default=True)
def __str__(self):
return "%s the bar" % self.place.name
class UndergroundBar(models.Model):
place = models.OneToOneField(Place, models.SET_NULL, null=True)
serves_cocktails = models.BooleanField(default=True)
@python_2_unicode_compatible
class Waiter(models.Model):
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "%s the waiter at %s" % (self.name, self.restaurant)
@python_2_unicode_compatible
class Favorites(models.Model):
name = models.CharField(max_length=50)
restaurants = models.ManyToManyField(Restaurant)
def __str__(self):
return "Favorites for %s" % self.name
class ManualPrimaryKey(models.Model):
primary_key = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=50)
class RelatedModel(models.Model):
link = models.OneToOneField(ManualPrimaryKey, models.CASCADE)
name = models.CharField(max_length=50)
@python_2_unicode_compatible
class MultiModel(models.Model):
link1 = models.OneToOneField(Place, models.CASCADE)
link2 = models.OneToOneField(ManualPrimaryKey, models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "Multimodel %s" % self.name
class Target(models.Model):
name = models.CharField(max_length=50)
class Pointer(models.Model):
other = models.OneToOneField(Target, models.CASCADE, primary_key=True)
class Pointer2(models.Model):
other = models.OneToOneField(Target, models.CASCADE, related_name='second_pointer')
class HiddenPointer(models.Model):
target = models.OneToOneField(Target, models.CASCADE, related_name='hidden+')
# Test related objects visibility.
class SchoolManager(models.Manager):
def get_queryset(self):
return super(SchoolManager, self).get_queryset().filter(is_public=True)
class School(models.Model):
is_public = models.BooleanField(default=False)
objects = SchoolManager()
class DirectorManager(models.Manager):
def get_queryset(self):
return super(DirectorManager, self).get_queryset().filter(is_temp=False)
class Director(models.Model):
is_temp = models.BooleanField(default=False)
school = models.OneToOneField(School, models.CASCADE)
objects = DirectorManager()
| bsd-3-clause |
CiNC0/Cartier | cartier-python-resign-linux/tests/test_versioning.py | 1 | 1194 | #!/usr/bin/env python
import os.path
import importlib
import unittest
tests_dir = os.path.abspath(os.path.dirname(__file__))
package_name = tests_dir.split(os.path.sep)[-2].replace('-', '_')
package = importlib.import_module(package_name)
class VersioningTestCase(unittest.TestCase):
def assert_proper_attribute(self, attribute):
try:
assert getattr(package, attribute), (
"{} improperly set".format(attribute))
except AttributeError:
assert False, "missing {}".format(attribute)
def test_version_attribute(self):
self.assert_proper_attribute("__version__")
# test major, minor, and patch are numbers
version_split = package.__version__.split(".")[:3]
assert version_split, "__version__ is not set"
for n in version_split:
try:
int(n)
except ValueError:
assert False, "'{}' is not an integer".format(n)
def test_commit_attribute(self):
self.assert_proper_attribute("__commit__")
def test_build_attribute(self):
self.assert_proper_attribute("__build__")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ortylp/scipy | scipy/special/tests/test_basic.py | 4 | 122266 | # this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises, assert_array_almost_equal_nulp
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_equal(special.factorial(5, exact=True), 120)
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
acogdev/ansible | test/integration/setup_gce.py | 163 | 1391 | '''
Create GCE resources for use in integration tests.
Takes a prefix as a command-line argument and creates two persistent disks named
${prefix}-base and ${prefix}-extra and a snapshot of the base disk named
${prefix}-snapshot. prefix will be forced to lowercase, to ensure the names are
legal GCE resource names.
'''
import sys
import optparse
import gce_credentials
def parse_args():
parser = optparse.OptionParser(
usage="%s [options] <prefix>" % (sys.argv[0],), description=__doc__)
gce_credentials.add_credentials_options(parser)
parser.add_option("--prefix",
action="store", dest="prefix",
help="String used to prefix GCE resource names (default: %default)")
(opts, args) = parser.parse_args()
gce_credentials.check_required(opts, parser)
if not args:
parser.error("Missing required argument: name prefix")
return (opts, args)
if __name__ == '__main__':
(opts, args) = parse_args()
gce = gce_credentials.get_gce_driver(opts)
prefix = args[0].lower()
try:
base_volume = gce.create_volume(
size=10, name=prefix+'-base', location='us-central1-a')
gce.create_volume_snapshot(base_volume, name=prefix+'-snapshot')
gce.create_volume(
size=10, name=prefix+'-extra', location='us-central1-a')
except KeyboardInterrupt as e:
print("\nExiting on user command.")
| gpl-3.0 |
joequery/django | tests/admin_widgets/models.py | 227 | 4760 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class MyFileField(models.FileField):
pass
@python_2_unicode_compatible
class Member(models.Model):
name = models.CharField(max_length=100)
birthdate = models.DateTimeField(blank=True, null=True)
gender = models.CharField(max_length=1, blank=True, choices=[('M', 'Male'), ('F', 'Female')])
email = models.EmailField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
style = models.CharField(max_length=20)
members = models.ManyToManyField(Member)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Album(models.Model):
band = models.ForeignKey(Band, models.CASCADE)
name = models.CharField(max_length=100)
cover_art = models.FileField(upload_to='albums')
backside_art = MyFileField(upload_to='albums_back', null=True)
def __str__(self):
return self.name
class HiddenInventoryManager(models.Manager):
def get_queryset(self):
return super(HiddenInventoryManager, self).get_queryset().filter(hidden=False)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
hidden = models.BooleanField(default=False)
# see #9258
default_manager = models.Manager()
objects = HiddenInventoryManager()
def __str__(self):
return self.name
class Event(models.Model):
main_band = models.ForeignKey(
Band,
models.CASCADE,
limit_choices_to=models.Q(pk__gt=0),
related_name='events_main_band_at',
)
supporting_bands = models.ManyToManyField(Band, blank=True, related_name='events_supporting_band_at')
start_date = models.DateField(blank=True, null=True)
start_time = models.TimeField(blank=True, null=True)
description = models.TextField(blank=True)
link = models.URLField(blank=True)
min_age = models.IntegerField(blank=True, null=True)
@python_2_unicode_compatible
class Car(models.Model):
owner = models.ForeignKey(User, models.CASCADE)
make = models.CharField(max_length=30)
model = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.make, self.model)
class CarTire(models.Model):
"""
A single car tire. This to test that a user can only select their own cars.
"""
car = models.ForeignKey(Car, models.CASCADE)
class Honeycomb(models.Model):
location = models.CharField(max_length=20)
class Bee(models.Model):
"""
A model with a FK to a model that won't be registered with the admin
(Honeycomb) so the corresponding raw ID widget won't have a magnifying
glass link to select related honeycomb instances.
"""
honeycomb = models.ForeignKey(Honeycomb, models.CASCADE)
class Individual(models.Model):
"""
A model with a FK to itself. It won't be registered with the admin, so the
corresponding raw ID widget won't have a magnifying glass link to select
related instances (rendering will be called programmatically in this case).
"""
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, null=True)
soulmate = models.ForeignKey('self', models.CASCADE, null=True, related_name='soulmates')
class Company(models.Model):
name = models.CharField(max_length=20)
class Advisor(models.Model):
"""
A model with a m2m to a model that won't be registered with the admin
(Company) so the corresponding raw ID widget won't have a magnifying
glass link to select related company instances.
"""
name = models.CharField(max_length=20)
companies = models.ManyToManyField(Company)
@python_2_unicode_compatible
class Student(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class School(models.Model):
name = models.CharField(max_length=255)
students = models.ManyToManyField(Student, related_name='current_schools')
alumni = models.ManyToManyField(Student, related_name='previous_schools')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Profile(models.Model):
user = models.ForeignKey('auth.User', models.CASCADE, to_field='username')
def __str__(self):
return self.user.username
| bsd-3-clause |
andykimpe/chromium-test-npapi | tools/telemetry/telemetry/core/backends/webdriver/webdriver_tab_list_backend.py | 13 | 1386 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.webdriver import webdriver_tab_backend
class WebDriverTabListBackend(object):
def __init__(self, browser_backend):
self._browser_backend = browser_backend
# Stores the window handles.
self._tab_list = []
self._UpdateTabList()
def New(self, timeout=None):
# Webdriver API doesn't support tab controlling.
raise NotImplementedError()
def __iter__(self):
self._UpdateTabList()
return self._tab_list.__iter__()
def __len__(self):
self._UpdateTabList()
return len(self._tab_list)
def __getitem__(self, index):
self._UpdateTabList()
if len(self._tab_list) <= index:
raise IndexError('list index out of range')
return self._tab_list[index]
def _UpdateTabList(self):
window_handles = self._browser_backend.driver.window_handles
old_tab_list = self._tab_list
self._tab_list = []
for window_handle in window_handles:
tab = None
for old_tab in old_tab_list:
if old_tab.window_handle == window_handle:
tab = old_tab
break
else:
tab = webdriver_tab_backend.WebDriverTabBackend(
self._browser_backend, window_handle)
self._tab_list.append(tab)
| bsd-3-clause |
zdary/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/interactive.py | 102 | 85840 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Acknowledgements:
# Nicolas Economou, for his command line debugger on which this is inspired.
# http://tinyurl.com/nicolaseconomou
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Interactive debugging console.
@group Debugging:
ConsoleDebugger
@group Exceptions:
CmdError
"""
from __future__ import with_statement
__revision__ = "$Id$"
__all__ = [ 'ConsoleDebugger', 'CmdError' ]
# TODO document this module with docstrings.
# TODO command to set a last error breakpoint.
# TODO command to show available plugins.
from winappdbg import win32
from winappdbg import compat
from winappdbg.system import System
from winappdbg.util import PathOperations
from winappdbg.event import EventHandler, NoEvent
from winappdbg.textio import HexInput, HexOutput, HexDump, CrashDump, DebugLog
import os
import sys
import code
import time
import warnings
import traceback
# too many variables named "cmd" to have a module by the same name :P
from cmd import Cmd
# lazy imports
readline = None
#==============================================================================
class DummyEvent (NoEvent):
"Dummy event object used internally by L{ConsoleDebugger}."
def get_pid(self):
return self._pid
def get_tid(self):
return self._tid
def get_process(self):
return self._process
def get_thread(self):
return self._thread
#==============================================================================
class CmdError (Exception):
"""
Exception raised when a command parsing error occurs.
Used internally by L{ConsoleDebugger}.
"""
#==============================================================================
class ConsoleDebugger (Cmd, EventHandler):
"""
Interactive console debugger.
@see: L{Debug.interactive}
"""
#------------------------------------------------------------------------------
# Class variables
# Exception to raise when an error occurs executing a command.
command_error_exception = CmdError
# Milliseconds to wait for debug events in the main loop.
dwMilliseconds = 100
# History file name.
history_file = '.winappdbg_history'
# Confirm before quitting?
confirm_quit = True
# Valid plugin name characters.
valid_plugin_name_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXY' \
'abcdefghijklmnopqrstuvwxy' \
'012345678' \
'_'
# Names of the registers.
segment_names = ( 'cs', 'ds', 'es', 'fs', 'gs' )
register_alias_64_to_32 = {
'eax':'Rax', 'ebx':'Rbx', 'ecx':'Rcx', 'edx':'Rdx',
'eip':'Rip', 'ebp':'Rbp', 'esp':'Rsp', 'esi':'Rsi', 'edi':'Rdi'
}
register_alias_64_to_16 = { 'ax':'Rax', 'bx':'Rbx', 'cx':'Rcx', 'dx':'Rdx' }
register_alias_64_to_8_low = { 'al':'Rax', 'bl':'Rbx', 'cl':'Rcx', 'dl':'Rdx' }
register_alias_64_to_8_high = { 'ah':'Rax', 'bh':'Rbx', 'ch':'Rcx', 'dh':'Rdx' }
register_alias_32_to_16 = { 'ax':'Eax', 'bx':'Ebx', 'cx':'Ecx', 'dx':'Edx' }
register_alias_32_to_8_low = { 'al':'Eax', 'bl':'Ebx', 'cl':'Ecx', 'dl':'Edx' }
register_alias_32_to_8_high = { 'ah':'Eax', 'bh':'Ebx', 'ch':'Ecx', 'dh':'Edx' }
register_aliases_full_32 = list(segment_names)
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_16))
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_8_low))
register_aliases_full_32.extend(compat.iterkeys(register_alias_32_to_8_high))
register_aliases_full_32 = tuple(register_aliases_full_32)
register_aliases_full_64 = list(segment_names)
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_32))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_16))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_8_low))
register_aliases_full_64.extend(compat.iterkeys(register_alias_64_to_8_high))
register_aliases_full_64 = tuple(register_aliases_full_64)
# Names of the control flow instructions.
jump_instructions = (
'jmp', 'jecxz', 'jcxz',
'ja', 'jnbe', 'jae', 'jnb', 'jb', 'jnae', 'jbe', 'jna', 'jc', 'je',
'jz', 'jnc', 'jne', 'jnz', 'jnp', 'jpo', 'jp', 'jpe', 'jg', 'jnle',
'jge', 'jnl', 'jl', 'jnge', 'jle', 'jng', 'jno', 'jns', 'jo', 'js'
)
call_instructions = ( 'call', 'ret', 'retn' )
loop_instructions = ( 'loop', 'loopz', 'loopnz', 'loope', 'loopne' )
control_flow_instructions = call_instructions + loop_instructions + \
jump_instructions
#------------------------------------------------------------------------------
# Instance variables
def __init__(self):
"""
Interactive console debugger.
@see: L{Debug.interactive}
"""
Cmd.__init__(self)
EventHandler.__init__(self)
# Quit the debugger when True.
self.debuggerExit = False
# Full path to the history file.
self.history_file_full_path = None
# Last executed command.
self.__lastcmd = ""
#------------------------------------------------------------------------------
# Debugger
# Use this Debug object.
def start_using_debugger(self, debug):
# Clear the previous Debug object.
self.stop_using_debugger()
# Keep the Debug object.
self.debug = debug
# Set ourselves as the event handler for the debugger.
self.prevHandler = debug.set_event_handler(self)
# Stop using the Debug object given by start_using_debugger().
# Circular references must be removed, or the destructors never get called.
def stop_using_debugger(self):
if hasattr(self, 'debug'):
debug = self.debug
debug.set_event_handler(self.prevHandler)
del self.prevHandler
del self.debug
return debug
return None
# Destroy the Debug object.
def destroy_debugger(self, autodetach = True):
debug = self.stop_using_debugger()
if debug is not None:
if not autodetach:
debug.kill_all(bIgnoreExceptions=True)
debug.lastEvent = None
debug.stop()
del debug
@property
def lastEvent(self):
return self.debug.lastEvent
def set_fake_last_event(self, process):
if self.lastEvent is None:
self.debug.lastEvent = DummyEvent(self.debug)
self.debug.lastEvent._process = process
self.debug.lastEvent._thread = process.get_thread(
process.get_thread_ids()[0])
self.debug.lastEvent._pid = process.get_pid()
self.debug.lastEvent._tid = self.lastEvent._thread.get_tid()
#------------------------------------------------------------------------------
# Input
# TODO
# * try to guess breakpoints when insufficient data is given
# * child Cmd instances will have to be used for other prompts, for example
# when assembling or editing memory - it may also be a good idea to think
# if it's possible to make the main Cmd instance also a child, instead of
# the debugger itself - probably the same goes for the EventHandler, maybe
# it can be used as a contained object rather than a parent class.
# Join a token list into an argument string.
def join_tokens(self, token_list):
return self.debug.system.argv_to_cmdline(token_list)
# Split an argument string into a token list.
def split_tokens(self, arg, min_count = 0, max_count = None):
token_list = self.debug.system.cmdline_to_argv(arg)
if len(token_list) < min_count:
raise CmdError("missing parameters.")
if max_count and len(token_list) > max_count:
raise CmdError("too many parameters.")
return token_list
# Token is a thread ID or name.
def input_thread(self, token):
targets = self.input_thread_list( [token] )
if len(targets) == 0:
raise CmdError("missing thread name or ID")
if len(targets) > 1:
msg = "more than one thread with that name:\n"
for tid in targets:
msg += "\t%d\n" % tid
msg = msg[:-len("\n")]
raise CmdError(msg)
return targets[0]
# Token list is a list of thread IDs or names.
def input_thread_list(self, token_list):
targets = set()
system = self.debug.system
for token in token_list:
try:
tid = self.input_integer(token)
if not system.has_thread(tid):
raise CmdError("thread not found (%d)" % tid)
targets.add(tid)
except ValueError:
found = set()
for process in system.iter_processes():
found.update( system.find_threads_by_name(token) )
if not found:
raise CmdError("thread not found (%s)" % token)
for thread in found:
targets.add( thread.get_tid() )
targets = list(targets)
targets.sort()
return targets
# Token is a process ID or name.
def input_process(self, token):
targets = self.input_process_list( [token] )
if len(targets) == 0:
raise CmdError("missing process name or ID")
if len(targets) > 1:
msg = "more than one process with that name:\n"
for pid in targets:
msg += "\t%d\n" % pid
msg = msg[:-len("\n")]
raise CmdError(msg)
return targets[0]
# Token list is a list of process IDs or names.
def input_process_list(self, token_list):
targets = set()
system = self.debug.system
for token in token_list:
try:
pid = self.input_integer(token)
if not system.has_process(pid):
raise CmdError("process not found (%d)" % pid)
targets.add(pid)
except ValueError:
found = system.find_processes_by_filename(token)
if not found:
raise CmdError("process not found (%s)" % token)
for (process, _) in found:
targets.add( process.get_pid() )
targets = list(targets)
targets.sort()
return targets
# Token is a command line to execute.
def input_command_line(self, command_line):
argv = self.debug.system.cmdline_to_argv(command_line)
if not argv:
raise CmdError("missing command line to execute")
fname = argv[0]
if not os.path.exists(fname):
try:
fname, _ = win32.SearchPath(None, fname, '.exe')
except WindowsError:
raise CmdError("file not found: %s" % fname)
argv[0] = fname
command_line = self.debug.system.argv_to_cmdline(argv)
return command_line
# Token is an integer.
# Only hexadecimal format is supported.
def input_hexadecimal_integer(self, token):
return int(token, 0x10)
# Token is an integer.
# It can be in any supported format.
def input_integer(self, token):
return HexInput.integer(token)
## input_integer = input_hexadecimal_integer
# Token is an address.
# The address can be a integer, a label or a register.
def input_address(self, token, pid = None, tid = None):
address = None
if self.is_register(token):
if tid is None:
if self.lastEvent is None or pid != self.lastEvent.get_pid():
msg = "can't resolve register (%s) for unknown thread"
raise CmdError(msg % token)
tid = self.lastEvent.get_tid()
address = self.input_register(token, tid)
if address is None:
try:
address = self.input_hexadecimal_integer(token)
except ValueError:
if pid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
elif self.lastEvent is not None and pid == self.lastEvent.get_pid():
process = self.lastEvent.get_process()
else:
try:
process = self.debug.system.get_process(pid)
except KeyError:
raise CmdError("process not found (%d)" % pid)
try:
address = process.resolve_label(token)
except Exception:
raise CmdError("unknown address (%s)" % token)
return address
# Token is an address range, or a single address.
# The addresses can be integers, labels or registers.
def input_address_range(self, token_list, pid = None, tid = None):
if len(token_list) == 2:
token_1, token_2 = token_list
address = self.input_address(token_1, pid, tid)
try:
size = self.input_integer(token_2)
except ValueError:
raise CmdError("bad address range: %s %s" % (token_1, token_2))
elif len(token_list) == 1:
token = token_list[0]
if '-' in token:
try:
token_1, token_2 = token.split('-')
except Exception:
raise CmdError("bad address range: %s" % token)
address = self.input_address(token_1, pid, tid)
size = self.input_address(token_2, pid, tid) - address
else:
address = self.input_address(token, pid, tid)
size = None
return address, size
# XXX TODO
# Support non-integer registers here.
def is_register(self, token):
if win32.arch == 'i386':
if token in self.register_aliases_full_32:
return True
token = token.title()
for (name, typ) in win32.CONTEXT._fields_:
if name == token:
return win32.sizeof(typ) == win32.sizeof(win32.DWORD)
elif win32.arch == 'amd64':
if token in self.register_aliases_full_64:
return True
token = token.title()
for (name, typ) in win32.CONTEXT._fields_:
if name == token:
return win32.sizeof(typ) == win32.sizeof(win32.DWORD64)
return False
# The token is a register name.
# Returns None if no register name is matched.
def input_register(self, token, tid = None):
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
else:
thread = self.debug.system.get_thread(tid)
ctx = thread.get_context()
token = token.lower()
title = token.title()
if title in ctx:
return ctx.get(title) # eax -> Eax
if ctx.arch == 'i386':
if token in self.segment_names:
return ctx.get( 'Seg%s' % title ) # cs -> SegCs
if token in self.register_alias_32_to_16:
return ctx.get( self.register_alias_32_to_16[token] ) & 0xFFFF
if token in self.register_alias_32_to_8_low:
return ctx.get( self.register_alias_32_to_8_low[token] ) & 0xFF
if token in self.register_alias_32_to_8_high:
return (ctx.get( self.register_alias_32_to_8_high[token] ) & 0xFF00) >> 8
elif ctx.arch == 'amd64':
if token in self.segment_names:
return ctx.get( 'Seg%s' % title ) # cs -> SegCs
if token in self.register_alias_64_to_32:
return ctx.get( self.register_alias_64_to_32[token] ) & 0xFFFFFFFF
if token in self.register_alias_64_to_16:
return ctx.get( self.register_alias_64_to_16[token] ) & 0xFFFF
if token in self.register_alias_64_to_8_low:
return ctx.get( self.register_alias_64_to_8_low[token] ) & 0xFF
if token in self.register_alias_64_to_8_high:
return (ctx.get( self.register_alias_64_to_8_high[token] ) & 0xFF00) >> 8
return None
# Token list contains an address or address range.
# The prefix is also parsed looking for process and thread IDs.
def input_full_address_range(self, token_list):
pid, tid = self.get_process_and_thread_ids_from_prefix()
address, size = self.input_address_range(token_list, pid, tid)
return pid, tid, address, size
# Token list contains a breakpoint.
def input_breakpoint(self, token_list):
pid, tid, address, size = self.input_full_address_range(token_list)
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
return pid, tid, address, size
# Token list contains a memory address, and optional size and process.
# Sets the results as the default for the next display command.
def input_display(self, token_list, default_size = 64):
pid, tid, address, size = self.input_full_address_range(token_list)
if not size:
size = default_size
next_address = HexOutput.integer(address + size)
self.default_display_target = next_address
return pid, tid, address, size
#------------------------------------------------------------------------------
# Output
# Tell the user a module was loaded.
def print_module_load(self, event):
mod = event.get_module()
base = mod.get_base()
name = mod.get_filename()
if not name:
name = ''
msg = "Loaded module (%s) %s"
msg = msg % (HexDump.address(base), name)
print(msg)
# Tell the user a module was unloaded.
def print_module_unload(self, event):
mod = event.get_module()
base = mod.get_base()
name = mod.get_filename()
if not name:
name = ''
msg = "Unloaded module (%s) %s"
msg = msg % (HexDump.address(base), name)
print(msg)
# Tell the user a process was started.
def print_process_start(self, event):
pid = event.get_pid()
start = event.get_start_address()
if start:
start = HexOutput.address(start)
print("Started process %d at %s" % (pid, start))
else:
print("Attached to process %d" % pid)
# Tell the user a thread was started.
def print_thread_start(self, event):
tid = event.get_tid()
start = event.get_start_address()
if start:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
start = event.get_process().get_label_at_address(start)
print("Started thread %d at %s" % (tid, start))
else:
print("Attached to thread %d" % tid)
# Tell the user a process has finished.
def print_process_end(self, event):
pid = event.get_pid()
code = event.get_exit_code()
print("Process %d terminated, exit code %d" % (pid, code))
# Tell the user a thread has finished.
def print_thread_end(self, event):
tid = event.get_tid()
code = event.get_exit_code()
print("Thread %d terminated, exit code %d" % (tid, code))
# Print(debug strings.
def print_debug_string(self, event):
tid = event.get_tid()
string = event.get_debug_string()
print("Thread %d says: %r" % (tid, string))
# Inform the user of any other debugging event.
def print_event(self, event):
code = HexDump.integer( event.get_event_code() )
name = event.get_event_name()
desc = event.get_event_description()
if code in desc:
print('')
print("%s: %s" % (name, desc))
else:
print('')
print("%s (%s): %s" % (name, code, desc))
self.print_event_location(event)
# Stop on exceptions and prompt for commands.
def print_exception(self, event):
address = HexDump.address( event.get_exception_address() )
code = HexDump.integer( event.get_exception_code() )
desc = event.get_exception_description()
if event.is_first_chance():
chance = 'first'
else:
chance = 'second'
if code in desc:
msg = "%s at address %s (%s chance)" % (desc, address, chance)
else:
msg = "%s (%s) at address %s (%s chance)" % (desc, code, address, chance)
print('')
print(msg)
self.print_event_location(event)
# Show the current location in the code.
def print_event_location(self, event):
process = event.get_process()
thread = event.get_thread()
self.print_current_location(process, thread)
# Show the current location in the code.
def print_breakpoint_location(self, event):
process = event.get_process()
thread = event.get_thread()
pc = event.get_exception_address()
self.print_current_location(process, thread, pc)
# Show the current location in any process and thread.
def print_current_location(self, process = None, thread = None, pc = None):
if not process:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
if not thread:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
thread.suspend()
try:
if pc is None:
pc = thread.get_pc()
ctx = thread.get_context()
finally:
thread.resume()
label = process.get_label_at_address(pc)
try:
disasm = process.disassemble(pc, 15)
except WindowsError:
disasm = None
except NotImplementedError:
disasm = None
print('')
print(CrashDump.dump_registers(ctx),)
print("%s:" % label)
if disasm:
print(CrashDump.dump_code_line(disasm[0], pc, bShowDump = True))
else:
try:
data = process.peek(pc, 15)
except Exception:
data = None
if data:
print('%s: %s' % (HexDump.address(pc), HexDump.hexblock_byte(data)))
else:
print('%s: ???' % HexDump.address(pc))
# Display memory contents using a given method.
def print_memory_display(self, arg, method):
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_display(token_list)
label = self.get_process(pid).get_label_at_address(address)
data = self.read_memory(address, size, pid)
if data:
print("%s:" % label)
print(method(data, address),)
#------------------------------------------------------------------------------
# Debugging
# Get the process ID from the prefix or the last event.
def get_process_id_from_prefix(self):
if self.cmdprefix:
pid = self.input_process(self.cmdprefix)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
return pid
# Get the thread ID from the prefix or the last event.
def get_thread_id_from_prefix(self):
if self.cmdprefix:
tid = self.input_thread(self.cmdprefix)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
tid = self.lastEvent.get_tid()
return tid
# Get the process from the prefix or the last event.
def get_process_from_prefix(self):
pid = self.get_process_id_from_prefix()
return self.get_process(pid)
# Get the thread from the prefix or the last event.
def get_thread_from_prefix(self):
tid = self.get_thread_id_from_prefix()
return self.get_thread(tid)
# Get the process and thread IDs from the prefix or the last event.
def get_process_and_thread_ids_from_prefix(self):
if self.cmdprefix:
try:
pid = self.input_process(self.cmdprefix)
tid = None
except CmdError:
try:
tid = self.input_thread(self.cmdprefix)
pid = self.debug.system.get_thread(tid).get_pid()
except CmdError:
msg = "unknown process or thread (%s)" % self.cmdprefix
raise CmdError(msg)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
tid = self.lastEvent.get_tid()
return pid, tid
# Get the process and thread from the prefix or the last event.
def get_process_and_thread_from_prefix(self):
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = self.get_thread(tid)
return process, thread
# Get the process object.
def get_process(self, pid = None):
if pid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
process = self.lastEvent.get_process()
elif self.lastEvent is not None and pid == self.lastEvent.get_pid():
process = self.lastEvent.get_process()
else:
try:
process = self.debug.system.get_process(pid)
except KeyError:
raise CmdError("process not found (%d)" % pid)
return process
# Get the thread object.
def get_thread(self, tid = None):
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
elif self.lastEvent is not None and tid == self.lastEvent.get_tid():
thread = self.lastEvent.get_thread()
else:
try:
thread = self.debug.system.get_thread(tid)
except KeyError:
raise CmdError("thread not found (%d)" % tid)
return thread
# Read the process memory.
def read_memory(self, address, size, pid = None):
process = self.get_process(pid)
try:
data = process.peek(address, size)
except WindowsError:
orig_address = HexOutput.integer(address)
next_address = HexOutput.integer(address + size)
msg = "error reading process %d, from %s to %s (%d bytes)"
msg = msg % (pid, orig_address, next_address, size)
raise CmdError(msg)
return data
# Write the process memory.
def write_memory(self, address, data, pid = None):
process = self.get_process(pid)
try:
process.write(address, data)
except WindowsError:
size = len(data)
orig_address = HexOutput.integer(address)
next_address = HexOutput.integer(address + size)
msg = "error reading process %d, from %s to %s (%d bytes)"
msg = msg % (pid, orig_address, next_address, size)
raise CmdError(msg)
# Change a register value.
def change_register(self, register, value, tid = None):
# Get the thread.
if tid is None:
if self.lastEvent is None:
raise CmdError("no current process set")
thread = self.lastEvent.get_thread()
else:
try:
thread = self.debug.system.get_thread(tid)
except KeyError:
raise CmdError("thread not found (%d)" % tid)
# Convert the value to integer type.
try:
value = self.input_integer(value)
except ValueError:
pid = thread.get_pid()
value = self.input_address(value, pid, tid)
# Suspend the thread.
# The finally clause ensures the thread is resumed before returning.
thread.suspend()
try:
# Get the current context.
ctx = thread.get_context()
# Register name matching is case insensitive.
register = register.lower()
# Integer 32 bits registers.
if register in self.register_names:
register = register.title() # eax -> Eax
# Segment (16 bit) registers.
if register in self.segment_names:
register = 'Seg%s' % register.title() # cs -> SegCs
value = value & 0x0000FFFF
# Integer 16 bits registers.
if register in self.register_alias_16:
register = self.register_alias_16[register]
previous = ctx.get(register) & 0xFFFF0000
value = (value & 0x0000FFFF) | previous
# Integer 8 bits registers (low part).
if register in self.register_alias_8_low:
register = self.register_alias_8_low[register]
previous = ctx.get(register) % 0xFFFFFF00
value = (value & 0x000000FF) | previous
# Integer 8 bits registers (high part).
if register in self.register_alias_8_high:
register = self.register_alias_8_high[register]
previous = ctx.get(register) % 0xFFFF00FF
value = ((value & 0x000000FF) << 8) | previous
# Set the new context.
ctx.__setitem__(register, value)
thread.set_context(ctx)
# Resume the thread.
finally:
thread.resume()
# Very crude way to find data within the process memory.
# TODO: Perhaps pfind.py can be integrated here instead.
def find_in_memory(self, query, process):
for mbi in process.get_memory_map():
if mbi.State != win32.MEM_COMMIT or mbi.Protect & win32.PAGE_GUARD:
continue
address = mbi.BaseAddress
size = mbi.RegionSize
try:
data = process.read(address, size)
except WindowsError:
msg = "*** Warning: read error at address %s"
msg = msg % HexDump.address(address)
print(msg)
width = min(len(query), 16)
p = data.find(query)
while p >= 0:
q = p + len(query)
d = data[ p : min(q, p + width) ]
h = HexDump.hexline(d, width = width)
a = HexDump.address(address + p)
print("%s: %s" % (a, h))
p = data.find(query, q)
# Kill a process.
def kill_process(self, pid):
process = self.debug.system.get_process(pid)
try:
process.kill()
if self.debug.is_debugee(pid):
self.debug.detach(pid)
print("Killed process (%d)" % pid)
except Exception:
print("Error trying to kill process (%d)" % pid)
# Kill a thread.
def kill_thread(self, tid):
thread = self.debug.system.get_thread(tid)
try:
thread.kill()
process = thread.get_process()
pid = process.get_pid()
if self.debug.is_debugee(pid) and not process.is_alive():
self.debug.detach(pid)
print("Killed thread (%d)" % tid)
except Exception:
print("Error trying to kill thread (%d)" % tid)
#------------------------------------------------------------------------------
# Command prompt input
# Prompt the user for commands.
def prompt_user(self):
while not self.debuggerExit:
try:
self.cmdloop()
break
except CmdError:
e = sys.exc_info()[1]
print("*** Error: %s" % str(e))
except Exception:
traceback.print_exc()
## self.debuggerExit = True
# Prompt the user for a YES/NO kind of question.
def ask_user(self, msg, prompt = "Are you sure? (y/N): "):
print(msg)
answer = raw_input(prompt)
answer = answer.strip()[:1].lower()
return answer == 'y'
# Autocomplete the given command when not ambiguous.
# Convert it to lowercase (so commands are seen as case insensitive).
def autocomplete(self, cmd):
cmd = cmd.lower()
completed = self.completenames(cmd)
if len(completed) == 1:
cmd = completed[0]
return cmd
# Get the help text for the given list of command methods.
# Note it's NOT a list of commands, but a list of actual method names.
# Each line of text is stripped and all lines are sorted.
# Repeated text lines are removed.
# Returns a single, possibly multiline, string.
def get_help(self, commands):
msg = set()
for name in commands:
if name != 'do_help':
try:
doc = getattr(self, name).__doc__.split('\n')
except Exception:
return ( "No help available when Python"
" is run with the -OO switch." )
for x in doc:
x = x.strip()
if x:
msg.add(' %s' % x)
msg = list(msg)
msg.sort()
msg = '\n'.join(msg)
return msg
# Parse the prefix and remove it from the command line.
def split_prefix(self, line):
prefix = None
if line.startswith('~'):
pos = line.find(' ')
if pos == 1:
pos = line.find(' ', pos + 1)
if not pos < 0:
prefix = line[ 1 : pos ].strip()
line = line[ pos : ].strip()
return prefix, line
#------------------------------------------------------------------------------
# Cmd() hacks
# Header for help page.
doc_header = 'Available commands (type help * or help <command>)'
## # Read and write directly to stdin and stdout.
## # This prevents the use of raw_input and print.
## use_rawinput = False
@property
def prompt(self):
if self.lastEvent:
pid = self.lastEvent.get_pid()
tid = self.lastEvent.get_tid()
if self.debug.is_debugee(pid):
## return '~%d(%d)> ' % (tid, pid)
return '%d:%d> ' % (pid, tid)
return '> '
# Return a sorted list of method names.
# Only returns the methods that implement commands.
def get_names(self):
names = Cmd.get_names(self)
names = [ x for x in set(names) if x.startswith('do_') ]
names.sort()
return names
# Automatically autocomplete commands, even if Tab wasn't pressed.
# The prefix is removed from the line and stored in self.cmdprefix.
# Also implement the commands that consist of a symbol character.
def parseline(self, line):
self.cmdprefix, line = self.split_prefix(line)
line = line.strip()
if line:
if line[0] == '.':
line = 'plugin ' + line[1:]
elif line[0] == '#':
line = 'python ' + line[1:]
cmd, arg, line = Cmd.parseline(self, line)
if cmd:
cmd = self.autocomplete(cmd)
return cmd, arg, line
## # Don't repeat the last executed command.
## def emptyline(self):
## pass
# Reset the defaults for some commands.
def preloop(self):
self.default_disasm_target = 'eip'
self.default_display_target = 'eip'
self.last_display_command = self.do_db
# Put the prefix back in the command line.
def get_lastcmd(self):
return self.__lastcmd
def set_lastcmd(self, lastcmd):
if self.cmdprefix:
lastcmd = '~%s %s' % (self.cmdprefix, lastcmd)
self.__lastcmd = lastcmd
lastcmd = property(get_lastcmd, set_lastcmd)
# Quit the command prompt if the debuggerExit flag is on.
def postcmd(self, stop, line):
return stop or self.debuggerExit
#------------------------------------------------------------------------------
# Commands
# Each command contains a docstring with it's help text.
# The help text consist of independent text lines,
# where each line shows a command and it's parameters.
# Each command method has the help message for itself and all it's aliases.
# Only the docstring for the "help" command is shown as-is.
# NOTE: Command methods MUST be all lowercase!
# Extended help command.
def do_help(self, arg):
"""
? - show the list of available commands
? * - show help for all commands
? <command> [command...] - show help for the given command(s)
help - show the list of available commands
help * - show help for all commands
help <command> [command...] - show help for the given command(s)
"""
if not arg:
Cmd.do_help(self, arg)
elif arg in ('?', 'help'):
# An easter egg :)
print(" Help! I need somebody...")
print(" Help! Not just anybody...")
print(" Help! You know, I need someone...")
print(" Heeelp!")
else:
if arg == '*':
commands = self.get_names()
commands = [ x for x in commands if x.startswith('do_') ]
else:
commands = set()
for x in arg.split(' '):
x = x.strip()
if x:
for n in self.completenames(x):
commands.add( 'do_%s' % n )
commands = list(commands)
commands.sort()
print(self.get_help(commands))
def do_shell(self, arg):
"""
! - spawn a system shell
shell - spawn a system shell
! <command> [arguments...] - execute a single shell command
shell <command> [arguments...] - execute a single shell command
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# Try to use the environment to locate cmd.exe.
# If not found, it's usually OK to just use the filename,
# since cmd.exe is one of those "magic" programs that
# can be automatically found by CreateProcess.
shell = os.getenv('ComSpec', 'cmd.exe')
# When given a command, run it and return.
# When no command is given, spawn a shell.
if arg:
arg = '%s /c %s' % (shell, arg)
else:
arg = shell
process = self.debug.system.start_process(arg, bConsole = True)
process.wait()
# This hack fixes a bug in Python, the interpreter console is closing the
# stdin pipe when calling the exit() function (Ctrl+Z seems to work fine).
class _PythonExit(object):
def __repr__(self):
return "Use exit() or Ctrl-Z plus Return to exit"
def __call__(self):
raise SystemExit()
_python_exit = _PythonExit()
# Spawns a Python shell with some handy local variables and the winappdbg
# module already imported. Also the console banner is improved.
def _spawn_python_shell(self, arg):
import winappdbg
banner = ('Python %s on %s\nType "help", "copyright", '
'"credits" or "license" for more information.\n')
platform = winappdbg.version.lower()
platform = 'WinAppDbg %s' % platform
banner = banner % (sys.version, platform)
local = {}
local.update(__builtins__)
local.update({
'__name__' : '__console__',
'__doc__' : None,
'exit' : self._python_exit,
'self' : self,
'arg' : arg,
'winappdbg' : winappdbg,
})
try:
code.interact(banner=banner, local=local)
except SystemExit:
# We need to catch it so it doesn't kill our program.
pass
def do_python(self, arg):
"""
# - spawn a python interpreter
python - spawn a python interpreter
# <statement> - execute a single python statement
python <statement> - execute a single python statement
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
# When given a Python statement, execute it directly.
if arg:
try:
compat.exec_(arg, globals(), locals())
except Exception:
traceback.print_exc()
# When no statement is given, spawn a Python interpreter.
else:
try:
self._spawn_python_shell(arg)
except Exception:
e = sys.exc_info()[1]
raise CmdError(
"unhandled exception when running Python console: %s" % e)
# The plugins interface is quite simple.
#
# Just place a .py file with the plugin name in the "plugins" folder,
# for example "do_example.py" would implement the "example" command.
#
# The plugin must have a function named "do", which implements the
# command functionality exactly like the do_* methods of Cmd instances.
#
# The docstring for the "do" function will be parsed exactly like
# one of the debugger's commands - that is, each line is treated
# independently.
#
def do_plugin(self, arg):
"""
[~prefix] .<name> [arguments] - run a plugin command
[~prefix] plugin <name> [arguments] - run a plugin command
"""
pos = arg.find(' ')
if pos < 0:
name = arg
arg = ''
else:
name = arg[:pos]
arg = arg[pos:].strip()
if not name:
raise CmdError("missing plugin name")
for c in name:
if c not in self.valid_plugin_name_chars:
raise CmdError("invalid plugin name: %r" % name)
name = 'winappdbg.plugins.do_%s' % name
try:
plugin = __import__(name)
components = name.split('.')
for comp in components[1:]:
plugin = getattr(plugin, comp)
reload(plugin)
except ImportError:
raise CmdError("plugin not found: %s" % name)
try:
return plugin.do(self, arg)
except CmdError:
raise
except Exception:
e = sys.exc_info()[1]
## traceback.print_exc(e) # XXX DEBUG
raise CmdError("unhandled exception in plugin: %s" % e)
def do_quit(self, arg):
"""
quit - close the debugging session
q - close the debugging session
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.confirm_quit:
count = self.debug.get_debugee_count()
if count > 0:
if count == 1:
msg = "There's a program still running."
else:
msg = "There are %s programs still running." % count
if not self.ask_user(msg):
return False
self.debuggerExit = True
return True
do_q = do_quit
def do_attach(self, arg):
"""
attach <target> [target...] - attach to the given process(es)
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
targets = self.input_process_list( self.split_tokens(arg, 1) )
if not targets:
print("Error: missing parameters")
else:
debug = self.debug
for pid in targets:
try:
debug.attach(pid)
print("Attached to process (%d)" % pid)
except Exception:
print("Error: can't attach to process (%d)" % pid)
def do_detach(self, arg):
"""
[~process] detach - detach from the current process
detach - detach from the current process
detach <target> [target...] - detach from the given process(es)
"""
debug = self.debug
token_list = self.split_tokens(arg)
if self.cmdprefix:
token_list.insert(0, self.cmdprefix)
targets = self.input_process_list(token_list)
if not targets:
if self.lastEvent is None:
raise CmdError("no current process set")
targets = [ self.lastEvent.get_pid() ]
for pid in targets:
try:
debug.detach(pid)
print("Detached from process (%d)" % pid)
except Exception:
print("Error: can't detach from process (%d)" % pid)
def do_windowed(self, arg):
"""
windowed <target> [arguments...] - run a windowed program for debugging
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
cmdline = self.input_command_line(arg)
try:
process = self.debug.execl(arg,
bConsole = False,
bFollow = self.options.follow)
print("Spawned process (%d)" % process.get_pid())
except Exception:
raise CmdError("can't execute")
self.set_fake_last_event(process)
def do_console(self, arg):
"""
console <target> [arguments...] - run a console program for debugging
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
cmdline = self.input_command_line(arg)
try:
process = self.debug.execl(arg,
bConsole = True,
bFollow = self.options.follow)
print("Spawned process (%d)" % process.get_pid())
except Exception:
raise CmdError("can't execute")
self.set_fake_last_event(process)
def do_continue(self, arg):
"""
continue - continue execution
g - continue execution
go - continue execution
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.debug.get_debugee_count() > 0:
return True
do_g = do_continue
do_go = do_continue
def do_gh(self, arg):
"""
gh - go with exception handled
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.lastEvent:
self.lastEvent.continueStatus = win32.DBG_EXCEPTION_HANDLED
return self.do_go(arg)
def do_gn(self, arg):
"""
gn - go with exception not handled
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.lastEvent:
self.lastEvent.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
return self.do_go(arg)
def do_refresh(self, arg):
"""
refresh - refresh the list of running processes and threads
[~process] refresh - refresh the list of running threads
"""
if arg:
raise CmdError("too many arguments")
if self.cmdprefix:
process = self.get_process_from_prefix()
process.scan()
else:
self.debug.system.scan()
def do_processlist(self, arg):
"""
pl - show the processes being debugged
processlist - show the processes being debugged
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
system = self.debug.system
pid_list = self.debug.get_debugee_pids()
if pid_list:
print("Process ID File name")
for pid in pid_list:
if pid == 0:
filename = "System Idle Process"
elif pid == 4:
filename = "System"
else:
filename = system.get_process(pid).get_filename()
filename = PathOperations.pathname_to_filename(filename)
print("%-12d %s" % (pid, filename))
do_pl = do_processlist
def do_threadlist(self, arg):
"""
tl - show the threads being debugged
threadlist - show the threads being debugged
"""
if arg:
raise CmdError("too many arguments")
if self.cmdprefix:
process = self.get_process_from_prefix()
for thread in process.iter_threads():
tid = thread.get_tid()
name = thread.get_name()
print("%-12d %s" % (tid, name))
else:
system = self.debug.system
pid_list = self.debug.get_debugee_pids()
if pid_list:
print("Thread ID Thread name")
for pid in pid_list:
process = system.get_process(pid)
for thread in process.iter_threads():
tid = thread.get_tid()
name = thread.get_name()
print("%-12d %s" % (tid, name))
do_tl = do_threadlist
def do_kill(self, arg):
"""
[~process] kill - kill a process
[~thread] kill - kill a thread
kill - kill the current process
kill * - kill all debugged processes
kill <processes and/or threads...> - kill the given processes and threads
"""
if arg:
if arg == '*':
target_pids = self.debug.get_debugee_pids()
target_tids = list()
else:
target_pids = set()
target_tids = set()
if self.cmdprefix:
pid, tid = self.get_process_and_thread_ids_from_prefix()
if tid is None:
target_tids.add(tid)
else:
target_pids.add(pid)
for token in self.split_tokens(arg):
try:
pid = self.input_process(token)
target_pids.add(pid)
except CmdError:
try:
tid = self.input_process(token)
target_pids.add(pid)
except CmdError:
msg = "unknown process or thread (%s)" % token
raise CmdError(msg)
target_pids = list(target_pids)
target_tids = list(target_tids)
target_pids.sort()
target_tids.sort()
msg = "You are about to kill %d processes and %d threads."
msg = msg % ( len(target_pids), len(target_tids) )
if self.ask_user(msg):
for pid in target_pids:
self.kill_process(pid)
for tid in target_tids:
self.kill_thread(tid)
else:
if self.cmdprefix:
pid, tid = self.get_process_and_thread_ids_from_prefix()
if tid is None:
if self.lastEvent is not None and pid == self.lastEvent.get_pid():
msg = "You are about to kill the current process."
else:
msg = "You are about to kill process %d." % pid
if self.ask_user(msg):
self.kill_process(pid)
else:
if self.lastEvent is not None and tid == self.lastEvent.get_tid():
msg = "You are about to kill the current thread."
else:
msg = "You are about to kill thread %d." % tid
if self.ask_user(msg):
self.kill_thread(tid)
else:
if self.lastEvent is None:
raise CmdError("no current process set")
pid = self.lastEvent.get_pid()
if self.ask_user("You are about to kill the current process."):
self.kill_process(pid)
# TODO: create hidden threads using undocumented API calls.
def do_modload(self, arg):
"""
[~process] modload <filename.dll> - load a DLL module
"""
filename = self.split_tokens(arg, 1, 1)[0]
process = self.get_process_from_prefix()
try:
process.inject_dll(filename, bWait=False)
except RuntimeError:
print("Can't inject module: %r" % filename)
# TODO: modunload
def do_stack(self, arg):
"""
[~thread] k - show the stack trace
[~thread] stack - show the stack trace
"""
if arg: # XXX TODO add depth parameter
raise CmdError("too many arguments")
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
thread = process.get_thread(tid)
try:
stack_trace = thread.get_stack_trace_with_labels()
if stack_trace:
print(CrashDump.dump_stack_trace_with_labels(stack_trace),)
else:
print("No stack trace available for thread (%d)" % tid)
except WindowsError:
print("Can't get stack trace for thread (%d)" % tid)
do_k = do_stack
def do_break(self, arg):
"""
break - force a debug break in all debugees
break <process> [process...] - force a debug break
"""
debug = self.debug
system = debug.system
targets = self.input_process_list( self.split_tokens(arg) )
if not targets:
targets = debug.get_debugee_pids()
targets.sort()
if self.lastEvent:
current = self.lastEvent.get_pid()
else:
current = None
for pid in targets:
if pid != current and debug.is_debugee(pid):
process = system.get_process(pid)
try:
process.debug_break()
except WindowsError:
print("Can't force a debug break on process (%d)")
def do_step(self, arg):
"""
p - step on the current assembly instruction
next - step on the current assembly instruction
step - step on the current assembly instruction
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if self.lastEvent is None:
raise CmdError("no current process set")
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
pid = self.lastEvent.get_pid()
thread = self.lastEvent.get_thread()
pc = thread.get_pc()
code = thread.disassemble(pc, 16)[0]
size = code[1]
opcode = code[2].lower()
if ' ' in opcode:
opcode = opcode[ : opcode.find(' ') ]
if opcode in self.jump_instructions or opcode in ('int', 'ret', 'retn'):
return self.do_trace(arg)
address = pc + size
## print(hex(pc), hex(address), size # XXX DEBUG
self.debug.stalk_at(pid, address)
return True
do_p = do_step
do_next = do_step
def do_trace(self, arg):
"""
t - trace at the current assembly instruction
trace - trace at the current assembly instruction
"""
if arg: # XXX this check is to be removed
raise CmdError("too many arguments")
if self.lastEvent is None:
raise CmdError("no current thread set")
self.lastEvent.get_thread().set_tf()
return True
do_t = do_trace
def do_bp(self, arg):
"""
[~process] bp <address> - set a code breakpoint
"""
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 1)
try:
address = self.input_address(token_list[0], pid)
deferred = False
except Exception:
address = token_list[0]
deferred = True
if not address:
address = token_list[0]
deferred = True
self.debug.break_at(pid, address)
if deferred:
print("Deferred breakpoint set at %s" % address)
else:
print("Breakpoint set at %s" % address)
def do_ba(self, arg):
"""
[~thread] ba <a|w|e> <1|2|4|8> <address> - set hardware breakpoint
"""
debug = self.debug
thread = self.get_thread_from_prefix()
pid = thread.get_pid()
tid = thread.get_tid()
if not debug.is_debugee(pid):
raise CmdError("target thread is not being debugged")
token_list = self.split_tokens(arg, 3, 3)
access = token_list[0].lower()
size = token_list[1]
address = token_list[2]
if access == 'a':
access = debug.BP_BREAK_ON_ACCESS
elif access == 'w':
access = debug.BP_BREAK_ON_WRITE
elif access == 'e':
access = debug.BP_BREAK_ON_EXECUTION
else:
raise CmdError("bad access type: %s" % token_list[0])
if size == '1':
size = debug.BP_WATCH_BYTE
elif size == '2':
size = debug.BP_WATCH_WORD
elif size == '4':
size = debug.BP_WATCH_DWORD
elif size == '8':
size = debug.BP_WATCH_QWORD
else:
raise CmdError("bad breakpoint size: %s" % size)
thread = self.get_thread_from_prefix()
tid = thread.get_tid()
pid = thread.get_pid()
if not debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
address = self.input_address(address, pid)
if debug.has_hardware_breakpoint(tid, address):
debug.erase_hardware_breakpoint(tid, address)
debug.define_hardware_breakpoint(tid, address, access, size)
debug.enable_hardware_breakpoint(tid, address)
def do_bm(self, arg):
"""
[~process] bm <address-address> - set memory breakpoint
"""
pid = self.get_process_id_from_prefix()
if not self.debug.is_debugee(pid):
raise CmdError("target process is not being debugged")
process = self.get_process(pid)
token_list = self.split_tokens(arg, 1, 2)
address, size = self.input_address_range(token_list[0], pid)
self.debug.watch_buffer(pid, address, size)
def do_bl(self, arg):
"""
bl - list the breakpoints for the current process
bl * - list the breakpoints for all processes
[~process] bl - list the breakpoints for the given process
bl <process> [process...] - list the breakpoints for each given process
"""
debug = self.debug
if arg == '*':
if self.cmdprefix:
raise CmdError("prefix not supported")
breakpoints = debug.get_debugee_pids()
else:
targets = self.input_process_list( self.split_tokens(arg) )
if self.cmdprefix:
targets.insert(0, self.input_process(self.cmdprefix))
if not targets:
if self.lastEvent is None:
raise CmdError("no current process is set")
targets = [ self.lastEvent.get_pid() ]
for pid in targets:
bplist = debug.get_process_code_breakpoints(pid)
printed_process_banner = False
if bplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
dbplist = debug.get_process_deferred_code_breakpoints(pid)
if dbplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for (label, action, oneshot) in dbplist:
if oneshot:
address = " Deferred unconditional one-shot" \
" code breakpoint at %s"
else:
address = " Deferred unconditional" \
" code breakpoint at %s"
address = address % label
print(" %s" % address)
bplist = debug.get_process_page_breakpoints(pid)
if bplist:
if not printed_process_banner:
print("Process %d:" % pid)
printed_process_banner = True
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
for tid in debug.system.get_process(pid).iter_thread_ids():
bplist = debug.get_thread_hardware_breakpoints(tid)
if bplist:
print("Thread %d:" % tid)
for bp in bplist:
address = repr(bp)[1:-1].replace('remote address ','')
print(" %s" % address)
def do_bo(self, arg):
"""
[~process] bo <address> - make a code breakpoint one-shot
[~thread] bo <address> - make a hardware breakpoint one-shot
[~process] bo <address-address> - make a memory breakpoint one-shot
[~process] bo <address> <size> - make a memory breakpoint one-shot
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_one_shot_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_one_shot_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_one_shot_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_be(self, arg):
"""
[~process] be <address> - enable a code breakpoint
[~thread] be <address> - enable a hardware breakpoint
[~process] be <address-address> - enable a memory breakpoint
[~process] be <address> <size> - enable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.enable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.enable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.enable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_bd(self, arg):
"""
[~process] bd <address> - disable a code breakpoint
[~thread] bd <address> - disable a hardware breakpoint
[~process] bd <address-address> - disable a memory breakpoint
[~process] bd <address> <size> - disable a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.disable_hardware_breakpoint(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.disable_code_breakpoint(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.disable_page_breakpoint(pid, address)
found = True
if not found:
print("Error: breakpoint not found.")
def do_bc(self, arg):
"""
[~process] bc <address> - clear a code breakpoint
[~thread] bc <address> - clear a hardware breakpoint
[~process] bc <address-address> - clear a memory breakpoint
[~process] bc <address> <size> - clear a memory breakpoint
"""
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.dont_watch_variable(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.dont_break_at(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.dont_watch_buffer(pid, address, size)
found = True
if not found:
print("Error: breakpoint not found.")
def do_disassemble(self, arg):
"""
[~thread] u [register] - show code disassembly
[~process] u [address] - show code disassembly
[~thread] disassemble [register] - show code disassembly
[~process] disassemble [address] - show code disassembly
"""
if not arg:
arg = self.default_disasm_target
token_list = self.split_tokens(arg, 1, 1)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
address = self.input_address(token_list[0], pid, tid)
try:
code = process.disassemble(address, 15*8)[:8]
except Exception:
msg = "can't disassemble address %s"
msg = msg % HexDump.address(address)
raise CmdError(msg)
if code:
label = process.get_label_at_address(address)
last_code = code[-1]
next_address = last_code[0] + last_code[1]
next_address = HexOutput.integer(next_address)
self.default_disasm_target = next_address
print("%s:" % label)
## print(CrashDump.dump_code(code))
for line in code:
print(CrashDump.dump_code_line(line, bShowDump = False))
do_u = do_disassemble
def do_search(self, arg):
"""
[~process] s [address-address] <search string>
[~process] search [address-address] <search string>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_bytes(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
# TODO: need a prettier output here!
for addr in iter:
print(HexDump.address(addr, addr_width))
do_s = do_search
def do_searchhex(self, arg):
"""
[~process] sh [address-address] <hexadecimal pattern>
[~process] searchhex [address-address] <hexadecimal pattern>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_hexa(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
for addr, bytes in iter:
print(HexDump.hexblock(bytes, addr, addr_width),)
do_sh = do_searchhex
## def do_strings(self, arg):
## """
## [~process] strings - extract ASCII strings from memory
## """
## if arg:
## raise CmdError("too many arguments")
## pid, tid = self.get_process_and_thread_ids_from_prefix()
## process = self.get_process(pid)
## for addr, size, data in process.strings():
## print("%s: %r" % (HexDump.address(addr), data)
def do_d(self, arg):
"""
[~thread] d <register> - show memory contents
[~thread] d <register-register> - show memory contents
[~thread] d <register> <size> - show memory contents
[~process] d <address> - show memory contents
[~process] d <address-address> - show memory contents
[~process] d <address> <size> - show memory contents
"""
return self.last_display_command(arg)
def do_db(self, arg):
"""
[~thread] db <register> - show memory contents as bytes
[~thread] db <register-register> - show memory contents as bytes
[~thread] db <register> <size> - show memory contents as bytes
[~process] db <address> - show memory contents as bytes
[~process] db <address-address> - show memory contents as bytes
[~process] db <address> <size> - show memory contents as bytes
"""
self.print_memory_display(arg, HexDump.hexblock)
self.last_display_command = self.do_db
def do_dw(self, arg):
"""
[~thread] dw <register> - show memory contents as words
[~thread] dw <register-register> - show memory contents as words
[~thread] dw <register> <size> - show memory contents as words
[~process] dw <address> - show memory contents as words
[~process] dw <address-address> - show memory contents as words
[~process] dw <address> <size> - show memory contents as words
"""
self.print_memory_display(arg, HexDump.hexblock_word)
self.last_display_command = self.do_dw
def do_dd(self, arg):
"""
[~thread] dd <register> - show memory contents as dwords
[~thread] dd <register-register> - show memory contents as dwords
[~thread] dd <register> <size> - show memory contents as dwords
[~process] dd <address> - show memory contents as dwords
[~process] dd <address-address> - show memory contents as dwords
[~process] dd <address> <size> - show memory contents as dwords
"""
self.print_memory_display(arg, HexDump.hexblock_dword)
self.last_display_command = self.do_dd
def do_dq(self, arg):
"""
[~thread] dq <register> - show memory contents as qwords
[~thread] dq <register-register> - show memory contents as qwords
[~thread] dq <register> <size> - show memory contents as qwords
[~process] dq <address> - show memory contents as qwords
[~process] dq <address-address> - show memory contents as qwords
[~process] dq <address> <size> - show memory contents as qwords
"""
self.print_memory_display(arg, HexDump.hexblock_qword)
self.last_display_command = self.do_dq
# XXX TODO
# Change the way the default is used with ds and du
def do_ds(self, arg):
"""
[~thread] ds <register> - show memory contents as ANSI string
[~process] ds <address> - show memory contents as ANSI string
"""
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 1)
pid, tid, address, size = self.input_display(token_list, 256)
process = self.get_process(pid)
data = process.peek_string(address, False, size)
if data:
print(repr(data))
self.last_display_command = self.do_ds
def do_du(self, arg):
"""
[~thread] du <register> - show memory contents as Unicode string
[~process] du <address> - show memory contents as Unicode string
"""
if not arg:
arg = self.default_display_target
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_display(token_list, 256)
process = self.get_process(pid)
data = process.peek_string(address, True, size)
if data:
print(repr(data))
self.last_display_command = self.do_du
def do_register(self, arg):
"""
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
"""
arg = arg.strip()
if not arg:
self.print_current_location()
else:
equ = arg.find('=')
if equ >= 0:
register = arg[:equ].strip()
value = arg[equ+1:].strip()
if not value:
value = '0'
self.change_register(register, value)
else:
value = self.input_register(arg)
if value is None:
raise CmdError("unknown register: %s" % arg)
try:
label = None
thread = self.get_thread_from_prefix()
process = thread.get_process()
module = process.get_module_at_address(value)
if module:
label = module.get_label_at_address(value)
except RuntimeError:
label = None
reg = arg.upper()
val = HexDump.address(value)
if label:
print("%s: %s (%s)" % (reg, val, label))
else:
print("%s: %s" % (reg, val))
do_r = do_register
def do_eb(self, arg):
"""
[~process] eb <address> <data> - write the data to the specified address
"""
# TODO
# data parameter should be optional, use a child Cmd here
pid = self.get_process_id_from_prefix()
token_list = self.split_tokens(arg, 2)
address = self.input_address(token_list[0], pid)
data = HexInput.hexadecimal(' '.join(token_list[1:]))
self.write_memory(address, data, pid)
# XXX TODO
# add ew, ed and eq here
def do_find(self, arg):
"""
[~process] f <string> - find the string in the process memory
[~process] find <string> - find the string in the process memory
"""
if not arg:
raise CmdError("missing parameter: string")
process = self.get_process_from_prefix()
self.find_in_memory(arg, process)
do_f = do_find
def do_memory(self, arg):
"""
[~process] m - show the process memory map
[~process] memory - show the process memory map
"""
if arg: # TODO: take min and max addresses
raise CmdError("too many arguments")
process = self.get_process_from_prefix()
try:
memoryMap = process.get_memory_map()
mappedFilenames = process.get_mapped_filenames()
print('')
print(CrashDump.dump_memory_map(memoryMap, mappedFilenames))
except WindowsError:
msg = "can't get memory information for process (%d)"
raise CmdError(msg % process.get_pid())
do_m = do_memory
#------------------------------------------------------------------------------
# Event handling
# TODO
# * add configurable stop/don't stop behavior on events and exceptions
# Stop for all events, unless stated otherwise.
def event(self, event):
self.print_event(event)
self.prompt_user()
# Stop for all exceptions, unless stated otherwise.
def exception(self, event):
self.print_exception(event)
self.prompt_user()
# Stop for breakpoint exceptions.
def breakpoint(self, event):
if hasattr(event, 'breakpoint') and event.breakpoint:
self.print_breakpoint_location(event)
else:
self.print_exception(event)
self.prompt_user()
# Stop for WOW64 breakpoint exceptions.
def wow64_breakpoint(self, event):
self.print_exception(event)
self.prompt_user()
# Stop for single step exceptions.
def single_step(self, event):
if event.debug.is_tracing(event.get_tid()):
self.print_breakpoint_location(event)
else:
self.print_exception(event)
self.prompt_user()
# Don't stop for C++ exceptions.
def ms_vc_exception(self, event):
self.print_exception(event)
event.continueStatus = win32.DBG_CONTINUE
# Don't stop for process start.
def create_process(self, event):
self.print_process_start(event)
self.print_thread_start(event)
self.print_module_load(event)
# Don't stop for process exit.
def exit_process(self, event):
self.print_process_end(event)
# Don't stop for thread creation.
def create_thread(self, event):
self.print_thread_start(event)
# Don't stop for thread exit.
def exit_thread(self, event):
self.print_thread_end(event)
# Don't stop for DLL load.
def load_dll(self, event):
self.print_module_load(event)
# Don't stop for DLL unload.
def unload_dll(self, event):
self.print_module_unload(event)
# Don't stop for debug strings.
def output_string(self, event):
self.print_debug_string(event)
#------------------------------------------------------------------------------
# History file
def load_history(self):
global readline
if readline is None:
try:
import readline
except ImportError:
return
if self.history_file_full_path is None:
folder = os.environ.get('USERPROFILE', '')
if not folder:
folder = os.environ.get('HOME', '')
if not folder:
folder = os.path.split(sys.argv[0])[1]
if not folder:
folder = os.path.curdir
self.history_file_full_path = os.path.join(folder,
self.history_file)
try:
if os.path.exists(self.history_file_full_path):
readline.read_history_file(self.history_file_full_path)
except IOError:
e = sys.exc_info()[1]
warnings.warn("Cannot load history file, reason: %s" % str(e))
def save_history(self):
if self.history_file_full_path is not None:
global readline
if readline is None:
try:
import readline
except ImportError:
return
try:
readline.write_history_file(self.history_file_full_path)
except IOError:
e = sys.exc_info()[1]
warnings.warn("Cannot save history file, reason: %s" % str(e))
#------------------------------------------------------------------------------
# Main loop
# Debugging loop.
def loop(self):
self.debuggerExit = False
debug = self.debug
# Stop on the initial event, if any.
if self.lastEvent is not None:
self.cmdqueue.append('r')
self.prompt_user()
# Loop until the debugger is told to quit.
while not self.debuggerExit:
try:
# If for some reason the last event wasn't continued,
# continue it here. This won't be done more than once
# for a given Event instance, though.
try:
debug.cont()
# On error, show the command prompt.
except Exception:
traceback.print_exc()
self.prompt_user()
# While debugees are attached, handle debug events.
# Some debug events may cause the command prompt to be shown.
if self.debug.get_debugee_count() > 0:
try:
# Get the next debug event.
debug.wait()
# Dispatch the debug event.
try:
debug.dispatch()
# Continue the debug event.
finally:
debug.cont()
# On error, show the command prompt.
except Exception:
traceback.print_exc()
self.prompt_user()
# While no debugees are attached, show the command prompt.
else:
self.prompt_user()
# When the user presses Ctrl-C send a debug break to all debugees.
except KeyboardInterrupt:
success = False
try:
print("*** User requested debug break")
system = debug.system
for pid in debug.get_debugee_pids():
try:
system.get_process(pid).debug_break()
success = True
except:
traceback.print_exc()
except:
traceback.print_exc()
if not success:
raise # This should never happen!
| apache-2.0 |
rgerkin/pyNeuroML | pyneuroml/tune/NeuroMLSimulation.py | 1 | 5357 | '''
A class for running a single instance of a NeuroML model by generating a
LEMS file and using pyNeuroML to run in a chosen simulator
'''
import sys
import time
from pyneuroml import pynml
from pyneuroml.lems import generate_lems_file_for_neuroml
try:
import pyelectro # Not used here, just for checking installation
except:
print('>> Note: pyelectro from https://github.com/pgleeson/pyelectro is required!')
exit()
try:
import neurotune # Not used here, just for checking installation
except:
print('>> Note: neurotune from https://github.com/pgleeson/neurotune is required!')
exit()
class NeuroMLSimulation(object):
def __init__(self,
reference,
neuroml_file,
target,
sim_time=1000,
dt=0.05,
simulator='jNeuroML',
generate_dir = './',
cleanup = True,
nml_doc = None):
self.sim_time = sim_time
self.dt = dt
self.simulator = simulator
self.generate_dir = generate_dir if generate_dir.endswith('/') else generate_dir+'/'
self.reference = reference
self.target = target
self.neuroml_file = neuroml_file
self.nml_doc = nml_doc
self.cleanup = cleanup
self.already_run = False
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.already_run:
for ref in self.volts.keys():
plt.plot(self.t, self.volts[ref], label=ref)
plt.title("Simulation voltage vs time")
plt.legend()
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
pynml.print_comment("First you have to 'go()' the simulation.", True)
plt.show()
def go(self):
lems_file_name = 'LEMS_%s.xml'%(self.reference)
generate_lems_file_for_neuroml(self.reference,
self.neuroml_file,
self.target,
self.sim_time,
self.dt,
lems_file_name = lems_file_name,
target_dir = self.generate_dir,
nml_doc = self.nml_doc)
pynml.print_comment_v("Running a simulation of %s ms with timestep %s ms: %s"%(self.sim_time, self.dt, lems_file_name))
self.already_run = True
start = time.time()
if self.simulator == 'jNeuroML':
results = pynml.run_lems_with_jneuroml(lems_file_name,
nogui=True,
load_saved_data=True,
plot=False,
exec_in_dir = self.generate_dir,
verbose=False,
cleanup=self.cleanup)
elif self.simulator == 'jNeuroML_NEURON':
results = pynml.run_lems_with_jneuroml_neuron(lems_file_name,
nogui=True,
load_saved_data=True,
plot=False,
exec_in_dir = self.generate_dir,
verbose=False,
cleanup=self.cleanup)
else:
pynml.print_comment_v('Unsupported simulator: %s'%self.simulator)
exit()
secs = time.time()-start
pynml.print_comment_v("Ran simulation in %s in %f seconds (%f mins)\n\n"%(self.simulator, secs, secs/60.0))
self.t = [t*1000 for t in results['t']]
self.volts = {}
for key in results.keys():
if key != 't':
self.volts[key] = [v*1000 for v in results[key]]
if __name__ == '__main__':
sim_time = 700
dt = 0.05
if len(sys.argv) == 2 and sys.argv[1] == '-net':
sim = NeuroMLSimulation('TestNet',
'../../examples/test_data/simplenet.nml',
'simplenet',
sim_time,
dt,
'jNeuroML',
'temp/')
sim.go()
sim.show()
else:
sim = NeuroMLSimulation('TestHH',
'../../examples/test_data/HHCellNetwork.net.nml',
'HHCellNetwork',
sim_time,
dt,
'jNeuroML',
'temp')
sim.go()
sim.show()
| lgpl-3.0 |
openhatch/new-mini-tasks | vendor/packages/Django/django/contrib/gis/geos/point.py | 224 | 4351 | from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
from django.utils import six
from django.utils.six.moves import xrange
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, six.integer_types + (float,)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, next(i))
capi.cs_sety(cs, 0, next(i))
if ndim == 3: capi.cs_setz(cs, 0, next(i))
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/IPython/terminal/pt_inputhooks/wx.py | 8 | 5441 | """Enable wxPython to be used interacively in prompt_toolkit
"""
from __future__ import absolute_import
import sys
import signal
import time
from timeit import default_timer as clock
import wx
def inputhook_wx1(context):
"""Run the wx event loop by processing pending events only.
This approach seems to work, but its performance is not great as it
relies on having PyOS_InputHook called regularly.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# Make a temporary event loop and process system events until
# there are no more waiting, then allow idle events (which
# will also deal with pending or posted wx events.)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
del ea
except KeyboardInterrupt:
pass
return 0
class EventLoopTimer(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
class EventLoopRunner(object):
def Run(self, time, input_is_ready):
self.input_is_ready = input_is_ready
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if self.input_is_ready():
self.timer.Stop()
self.evtloop.Exit()
def inputhook_wx2(context):
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10, # CHANGE time here to control polling interval
input_is_ready=context.input_is_ready)
except KeyboardInterrupt:
pass
return 0
def inputhook_wx3(context):
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not context.input_is_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
except KeyboardInterrupt:
pass
return 0
if sys.platform == 'darwin':
# On OSX, evtloop.Pending() always returns True, regardless of there being
# any events pending. As such we can't use implementations 1 or 3 of the
# inputhook as those depend on a pending/dispatch loop.
inputhook = inputhook_wx2
else:
# This is our default implementation
inputhook = inputhook_wx3
| gpl-3.0 |
bearstech/ansible | test/units/module_utils/test_network_common.py | 31 | 5437 | # -*- coding: utf-8 -*-
#
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.module_utils.network_common import to_list, sort_list
from ansible.module_utils.network_common import dict_diff, dict_merge
from ansible.module_utils.network_common import conditional, Template
class TestModuleUtilsNetworkCommon(unittest.TestCase):
def test_to_list(self):
for scalar in ('string', 1, True, False, None):
self.assertTrue(isinstance(to_list(scalar), list))
for container in ([1, 2, 3], {'one': 1}):
self.assertTrue(isinstance(to_list(container), list))
test_list = [1, 2, 3]
self.assertNotEqual(id(test_list), id(to_list(test_list)))
def test_sort(self):
data = [3, 1, 2]
self.assertEqual([1, 2, 3], sort_list(data))
string_data = '123'
self.assertEqual(string_data, sort_list(string_data))
def test_dict_diff(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_diff(base, other)
# string assertions
self.assertNotIn('one', result)
self.assertNotIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertNotIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [2, 1])
self.assertNotIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertNotIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertNotIn('key2', result['obj1'])
# bool assertions
self.assertNotIn('b1', result)
self.assertNotIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
self.assertIn('one', result)
self.assertIn('two', result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertIn('obj1', result)
self.assertIn('key1', result['obj1'])
self.assertIn('key2', result['obj1'])
# list assertions
self.assertEqual(result['l1'], [1, 2, 3])
self.assertIn('l2', result)
self.assertEqual(result['l3'], [1])
self.assertIn('l4', result)
# nested assertions
self.assertIn('obj1', result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertIn('key2', result['obj1'])
# bool assertions
self.assertIn('b1', result)
self.assertIn('b2', result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
def test_conditional(self):
self.assertTrue(conditional(10, 10))
self.assertTrue(conditional('10', '10'))
self.assertTrue(conditional('foo', 'foo'))
self.assertTrue(conditional(True, True))
self.assertTrue(conditional(False, False))
self.assertTrue(conditional(None, None))
self.assertTrue(conditional("ge(1)", 1))
self.assertTrue(conditional("gt(1)", 2))
self.assertTrue(conditional("le(2)", 2))
self.assertTrue(conditional("lt(3)", 2))
self.assertTrue(conditional("eq(1)", 1))
self.assertTrue(conditional("neq(0)", 1))
self.assertTrue(conditional("min(1)", 1))
self.assertTrue(conditional("max(1)", 1))
self.assertTrue(conditional("exactly(1)", 1))
def test_template(self):
tmpl = Template()
self.assertEqual('foo', tmpl('{{ test }}', {'test': 'foo'}))
| gpl-3.0 |
heke123/chromium-crosswalk | build/android/buildbot/bb_device_status_check.py | 7 | 14834 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class to keep track of devices across builds and report state."""
import argparse
import json
import logging
import os
import psutil
import re
import signal
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import devil_chromium
from devil import devil_env
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_list
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.constants import exit_codes
from devil.utils import lsusb
from devil.utils import reset_usb
from devil.utils import run_tests_helper
from pylib.constants import host_paths
_RE_DEVICE_ID = re.compile(r'Device ID = (\d+)')
def KillAllAdb():
def GetAllAdb():
for p in psutil.process_iter():
try:
if 'adb' in p.name:
yield p
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p in GetAllAdb():
try:
logging.info('kill %d %d (%s [%s])', sig, p.pid, p.name,
' '.join(p.cmdline))
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for p in GetAllAdb():
try:
logging.error('Unable to kill %d (%s [%s])', p.pid, p.name,
' '.join(p.cmdline))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def _IsBlacklisted(serial, blacklist):
return blacklist and serial in blacklist.Read()
def _BatteryStatus(device, blacklist):
battery_info = {}
try:
battery = battery_utils.BatteryUtils(device)
battery_info = battery.GetBatteryInfo(timeout=5)
battery_level = int(battery_info.get('level', 100))
if battery_level < 15:
logging.error('Critically low battery level (%d)', battery_level)
battery = battery_utils.BatteryUtils(device)
if not battery.GetCharging():
battery.SetCharging(True)
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()], reason='low_battery')
except device_errors.CommandFailedError:
logging.exception('Failed to get battery information for %s',
str(device))
return battery_info
def _IMEISlice(device):
imei_slice = ''
try:
for l in device.RunShellCommand(['dumpsys', 'iphonesubinfo'],
check_return=True, timeout=5):
m = _RE_DEVICE_ID.match(l)
if m:
imei_slice = m.group(1)[-6:]
except device_errors.CommandFailedError:
logging.exception('Failed to get IMEI slice for %s', str(device))
return imei_slice
def DeviceStatus(devices, blacklist):
"""Generates status information for the given devices.
Args:
devices: The devices to generate status for.
blacklist: The current device blacklist.
Returns:
A dict of the following form:
{
'<serial>': {
'serial': '<serial>',
'adb_status': str,
'usb_status': bool,
'blacklisted': bool,
# only if the device is connected and not blacklisted
'type': ro.build.product,
'build': ro.build.id,
'build_detail': ro.build.fingerprint,
'battery': {
...
},
'imei_slice': str,
'wifi_ip': str,
},
...
}
"""
adb_devices = {
a[0].GetDeviceSerial(): a
for a in adb_wrapper.AdbWrapper.Devices(desired_state=None, long_list=True)
}
usb_devices = set(lsusb.get_android_devices())
def blacklisting_device_status(device):
serial = device.adb.GetDeviceSerial()
adb_status = (
adb_devices[serial][1] if serial in adb_devices
else 'missing')
usb_status = bool(serial in usb_devices)
device_status = {
'serial': serial,
'adb_status': adb_status,
'usb_status': usb_status,
}
if not _IsBlacklisted(serial, blacklist):
if adb_status == 'device':
try:
build_product = device.build_product
build_id = device.build_id
build_fingerprint = device.GetProp('ro.build.fingerprint', cache=True)
wifi_ip = device.GetProp('dhcp.wlan0.ipaddress')
battery_info = _BatteryStatus(device, blacklist)
imei_slice = _IMEISlice(device)
if (device.product_name == 'mantaray' and
battery_info.get('AC powered', None) != 'true'):
logging.error('Mantaray device not connected to AC power.')
device_status.update({
'ro.build.product': build_product,
'ro.build.id': build_id,
'ro.build.fingerprint': build_fingerprint,
'battery': battery_info,
'imei_slice': imei_slice,
'wifi_ip': wifi_ip,
# TODO(jbudorick): Remove these once no clients depend on them.
'type': build_product,
'build': build_id,
'build_detail': build_fingerprint,
})
except device_errors.CommandFailedError:
logging.exception('Failure while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timeout while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_timeout')
elif blacklist:
blacklist.Extend([serial],
reason=adb_status if usb_status else 'offline')
device_status['blacklisted'] = _IsBlacklisted(serial, blacklist)
return device_status
parallel_devices = device_utils.DeviceUtils.parallel(devices)
statuses = parallel_devices.pMap(blacklisting_device_status).pGet(None)
return statuses
def RecoverDevices(devices, blacklist):
"""Attempts to recover any inoperable devices in the provided list.
Args:
devices: The list of devices to attempt to recover.
blacklist: The current device blacklist, which will be used then
reset.
Returns:
Nothing.
"""
statuses = DeviceStatus(devices, blacklist)
should_restart_usb = set(
status['serial'] for status in statuses
if (not status['usb_status']
or status['adb_status'] in ('offline', 'missing')))
should_restart_adb = should_restart_usb.union(set(
status['serial'] for status in statuses
if status['adb_status'] == 'unauthorized'))
should_reboot_device = should_restart_adb.union(set(
status['serial'] for status in statuses
if status['blacklisted']))
logging.debug('Should restart USB for:')
for d in should_restart_usb:
logging.debug(' %s', d)
logging.debug('Should restart ADB for:')
for d in should_restart_adb:
logging.debug(' %s', d)
logging.debug('Should reboot:')
for d in should_reboot_device:
logging.debug(' %s', d)
if blacklist:
blacklist.Reset()
if should_restart_adb:
KillAllAdb()
for serial in should_restart_usb:
try:
reset_usb.reset_android_usb(serial)
except IOError:
logging.exception('Unable to reset USB for %s.', serial)
if blacklist:
blacklist.Extend([serial], reason='usb_failure')
except device_errors.DeviceUnreachableError:
logging.exception('Unable to reset USB for %s.', serial)
if blacklist:
blacklist.Extend([serial], reason='offline')
def blacklisting_recovery(device):
if _IsBlacklisted(device.adb.GetDeviceSerial(), blacklist):
logging.debug('%s is blacklisted, skipping recovery.', str(device))
return
if str(device) in should_reboot_device:
try:
device.WaitUntilFullyBooted(retries=0)
return
except (device_errors.CommandTimeoutError,
device_errors.CommandFailedError):
logging.exception('Failure while waiting for %s. '
'Attempting to recover.', str(device))
try:
try:
device.Reboot(block=False, timeout=5, retries=0)
except device_errors.CommandTimeoutError:
logging.warning('Timed out while attempting to reboot %s normally.'
'Attempting alternative reboot.', str(device))
# The device drops offline before we can grab the exit code, so
# we don't check for status.
device.adb.Root()
device.adb.Shell('echo b > /proc/sysrq-trigger', expect_status=None,
timeout=5, retries=0)
except device_errors.CommandFailedError:
logging.exception('Failed to reboot %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timed out while rebooting %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_timeout')
try:
device.WaitUntilFullyBooted(retries=0)
except device_errors.CommandFailedError:
logging.exception('Failure while waiting for %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_failure')
except device_errors.CommandTimeoutError:
logging.exception('Timed out while waiting for %s.', str(device))
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()],
reason='reboot_timeout')
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_recovery)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out-dir',
help='Directory where the device path is stored',
default=os.path.join(host_paths.DIR_SOURCE_ROOT, 'out'))
parser.add_argument('--restart-usb', action='store_true',
help='DEPRECATED. '
'This script now always tries to reset USB.')
parser.add_argument('--json-output',
help='Output JSON information into a specified file.')
parser.add_argument('--adb-path',
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('--known-devices-file', action='append', default=[],
dest='known_devices_files',
help='Path to known device lists.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
devil_custom_deps = None
if args.adb_path:
devil_custom_deps = {
'adb': {
devil_env.GetPlatform(): [args.adb_path],
},
}
devil_chromium.Initialize(custom_deps=devil_custom_deps)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
last_devices_path = os.path.join(
args.out_dir, device_list.LAST_DEVICES_FILENAME)
args.known_devices_files.append(last_devices_path)
expected_devices = set()
try:
for path in args.known_devices_files:
if os.path.exists(path):
expected_devices.update(device_list.GetPersistentDeviceList(path))
except IOError:
logging.warning('Problem reading %s, skipping.', path)
logging.info('Expected devices:')
for device in expected_devices:
logging.info(' %s', device)
usb_devices = set(lsusb.get_android_devices())
devices = [device_utils.DeviceUtils(s)
for s in expected_devices.union(usb_devices)]
RecoverDevices(devices, blacklist)
statuses = DeviceStatus(devices, blacklist)
# Log the state of all devices.
for status in statuses:
logging.info(status['serial'])
adb_status = status.get('adb_status')
blacklisted = status.get('blacklisted')
logging.info(' USB status: %s',
'online' if status.get('usb_status') else 'offline')
logging.info(' ADB status: %s', adb_status)
logging.info(' Blacklisted: %s', str(blacklisted))
if adb_status == 'device' and not blacklisted:
logging.info(' Device type: %s', status.get('ro.build.product'))
logging.info(' OS build: %s', status.get('ro.build.id'))
logging.info(' OS build fingerprint: %s',
status.get('ro.build.fingerprint'))
logging.info(' Battery state:')
for k, v in status.get('battery', {}).iteritems():
logging.info(' %s: %s', k, v)
logging.info(' IMEI slice: %s', status.get('imei_slice'))
logging.info(' WiFi IP: %s', status.get('wifi_ip'))
# Update the last devices file(s).
for path in args.known_devices_files:
device_list.WritePersistentDeviceList(
path, [status['serial'] for status in statuses])
# Write device info to file for buildbot info display.
if os.path.exists('/home/chrome-bot'):
with open('/home/chrome-bot/.adb_device_info', 'w') as f:
for status in statuses:
try:
if status['adb_status'] == 'device':
f.write('{serial} {adb_status} {build_product} {build_id} '
'{temperature:.1f}C {level}%\n'.format(
serial=status['serial'],
adb_status=status['adb_status'],
build_product=status['type'],
build_id=status['build'],
temperature=float(status['battery']['temperature']) / 10,
level=status['battery']['level']
))
elif status.get('usb_status', False):
f.write('{serial} {adb_status}\n'.format(
serial=status['serial'],
adb_status=status['adb_status']
))
else:
f.write('{serial} offline\n'.format(
serial=status['serial']
))
except Exception: # pylint: disable=broad-except
pass
# Dump the device statuses to JSON.
if args.json_output:
with open(args.json_output, 'wb') as f:
f.write(json.dumps(statuses, indent=4))
live_devices = [status['serial'] for status in statuses
if (status['adb_status'] == 'device'
and not _IsBlacklisted(status['serial'], blacklist))]
# If all devices failed, or if there are no devices, it's an infra error.
return 0 if live_devices else exit_codes.INFRA
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
RonnyPfannschmidt/pytest | testing/test_faulthandler.py | 3 | 5123 | import io
import sys
import pytest
from _pytest.pytester import Pytester
def test_enabled(pytester: Pytester) -> None:
"""Test single crashing test displays a traceback."""
pytester.makepyfile(
"""
import faulthandler
def test_crash():
faulthandler._sigabrt()
"""
)
result = pytester.runpytest_subprocess()
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
def setup_crashing_test(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import faulthandler
import atexit
def test_ok():
atexit.register(faulthandler._sigabrt)
"""
)
def test_crash_during_shutdown_captured(pytester: Pytester) -> None:
"""
Re-enable faulthandler if pytest encountered it enabled during configure.
We should be able to then see crashes during interpreter shutdown.
"""
setup_crashing_test(pytester)
args = (sys.executable, "-Xfaulthandler", "-mpytest")
result = pytester.run(*args)
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
def test_crash_during_shutdown_not_captured(pytester: Pytester) -> None:
"""
Check that pytest leaves faulthandler disabled if it was not enabled during configure.
This prevents us from seeing crashes during interpreter shutdown (see #8260).
"""
setup_crashing_test(pytester)
args = (sys.executable, "-mpytest")
result = pytester.run(*args)
result.stderr.no_fnmatch_line("*Fatal Python error*")
assert result.ret != 0
def test_disabled(pytester: Pytester) -> None:
"""Test option to disable fault handler in the command line."""
pytester.makepyfile(
"""
import faulthandler
def test_disabled():
assert not faulthandler.is_enabled()
"""
)
result = pytester.runpytest_subprocess("-p", "no:faulthandler")
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
@pytest.mark.parametrize(
"enabled",
[
pytest.param(
True, marks=pytest.mark.skip(reason="sometimes crashes on CI (#7022)")
),
False,
],
)
def test_timeout(pytester: Pytester, enabled: bool) -> None:
"""Test option to dump tracebacks after a certain timeout.
If faulthandler is disabled, no traceback will be dumped.
"""
pytester.makepyfile(
"""
import os, time
def test_timeout():
time.sleep(1 if "CI" in os.environ else 0.1)
"""
)
pytester.makeini(
"""
[pytest]
faulthandler_timeout = 0.01
"""
)
args = ["-p", "no:faulthandler"] if not enabled else []
result = pytester.runpytest_subprocess(*args)
tb_output = "most recent call first"
if enabled:
result.stderr.fnmatch_lines(["*%s*" % tb_output])
else:
assert tb_output not in result.stderr.str()
result.stdout.fnmatch_lines(["*1 passed*"])
assert result.ret == 0
@pytest.mark.parametrize("hook_name", ["pytest_enter_pdb", "pytest_exception_interact"])
def test_cancel_timeout_on_hook(monkeypatch, hook_name) -> None:
"""Make sure that we are cancelling any scheduled traceback dumping due
to timeout before entering pdb (pytest-dev/pytest-faulthandler#12) or any
other interactive exception (pytest-dev/pytest-faulthandler#14)."""
import faulthandler
from _pytest import faulthandler as faulthandler_plugin
called = []
monkeypatch.setattr(
faulthandler, "cancel_dump_traceback_later", lambda: called.append(1)
)
# call our hook explicitly, we can trust that pytest will call the hook
# for us at the appropriate moment
hook_func = getattr(faulthandler_plugin, hook_name)
hook_func()
assert called == [1]
def test_already_initialized_crash(pytester: Pytester) -> None:
"""Even if faulthandler is already initialized, we still dump tracebacks on crashes (#8258)."""
pytester.makepyfile(
"""
def test():
import faulthandler
faulthandler._sigabrt()
"""
)
result = pytester.run(
sys.executable,
"-X",
"faulthandler",
"-mpytest",
pytester.path,
)
result.stderr.fnmatch_lines(["*Fatal Python error*"])
assert result.ret != 0
def test_get_stderr_fileno_invalid_fd() -> None:
"""Test for faulthandler being able to handle invalid file descriptors for stderr (#8249)."""
from _pytest.faulthandler import get_stderr_fileno
class StdErrWrapper(io.StringIO):
"""
Mimic ``twisted.logger.LoggingFile`` to simulate returning an invalid file descriptor.
https://github.com/twisted/twisted/blob/twisted-20.3.0/src/twisted/logger/_io.py#L132-L139
"""
def fileno(self):
return -1
wrapper = StdErrWrapper()
with pytest.MonkeyPatch.context() as mp:
mp.setattr("sys.stderr", wrapper)
# Even when the stderr wrapper signals an invalid file descriptor,
# ``_get_stderr_fileno()`` should return the real one.
assert get_stderr_fileno() == 2
| mit |
shiftcontrol/UnityOpenCV | opencv/tests/swig_python/highgui/match.py | 3 | 1348 | """
This script will compare tho images and decides with a threshold
if these to images are "equal enough"
"""
# import the necessary things for OpenCV
from cv import *
from highgui import *
import frames
import sys
import os
PREFIX=os.path.join(os.environ["srcdir"],"../../opencv_extra/testdata/python/images/")
DisplayImages=False
if DisplayImages:
videowindow="video"
referencewindow="reference"
cvNamedWindow(videowindow,CV_WINDOW_AUTOSIZE)
cvNamedWindow(referencewindow,CV_WINDOW_AUTOSIZE)
# returns True/False if match/non-match
def match( image, index, thres ):
# load image from comparison set
QCIFcompare=cvLoadImage(PREFIX+frames.QCIF[index])
if QCIFcompare is None:
print "Couldn't open image "+PREFIX+frames.QCIF[index]+" for comparison!"
sys.exit(1)
# resize comparison image to input image dimensions
size=cvSize(image.width,image.height)
compare=cvCreateImage(size,IPL_DEPTH_8U,image.nChannels)
cvResize(QCIFcompare,compare)
# compare images
diff=cvNorm( image, compare, CV_RELATIVE_L2 )
if DisplayImages:
cvShowImage(videowindow,image)
cvShowImage(referencewindow,compare)
if diff<=thres:
cvWaitKey(200)
else:
print "index==",index,": max==",thres," is==",diff
cvWaitKey(5000)
cvReleaseImage(QCIFcompare)
cvReleaseImage(compare)
if diff<=thres:
return True
else:
return False
| gpl-3.0 |
klaus385/openpilot | selfdrive/crash.py | 2 | 1230 | """Install exception handler for process crash."""
import os
import sys
from selfdrive.version import version, dirty
from selfdrive.swaglog import cloudlog
if os.getenv("NOLOG") or os.getenv("NOCRASH"):
def capture_exception(*exc_info):
pass
def bind_user(**kwargs):
pass
def bind_extra(**kwargs):
pass
def install():
pass
else:
from raven import Client
from raven.transport.http import HTTPTransport
client = Client('https://1994756b5e6f41cf939a4c65de45f4f2:[email protected]/77924',
install_sys_hook=False, transport=HTTPTransport, release=version, tags={'dirty': dirty})
def capture_exception(*args, **kwargs):
client.captureException(*args, **kwargs)
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
def bind_user(**kwargs):
client.user_context(kwargs)
def bind_extra(**kwargs):
client.extra_context(kwargs)
def install():
# installs a sys.excepthook
__excepthook__ = sys.excepthook
def handle_exception(*exc_info):
if exc_info[0] not in (KeyboardInterrupt, SystemExit):
capture_exception(exc_info=exc_info)
__excepthook__(*exc_info)
sys.excepthook = handle_exception
| mit |
Azure/azure-sdk-for-python | sdk/powerbiembedded/azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/operation_py3.py | 1 | 1163 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""Operation.
:param name: The name of the operation being performed on this particular
object. This name should match the action name that appears in RBAC / the
event service.
:type name: str
:param display:
:type display: ~azure.mgmt.powerbiembedded.models.Display
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'Display'},
}
def __init__(self, *, name: str=None, display=None, **kwargs) -> None:
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
| mit |
Anderson0026/mapproxy | mapproxy/script/conf/app.py | 1 | 6606 | # -:- encoding: utf-8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import sys
import os
import optparse
import logging
import textwrap
import datetime
import xml.etree.ElementTree
import yaml
from contextlib import contextmanager
from cStringIO import StringIO
from .sources import sources
from .layers import layers
from .caches import caches
from .seeds import seeds
from .utils import update_config, MapProxyYAMLDumper, download_capabilities
from mapproxy.config.loader import load_configuration
from mapproxy.util.ext.wmsparse import parse_capabilities
def setup_logging(level=logging.INFO):
mapproxy_log = logging.getLogger('mapproxy')
mapproxy_log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s] %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
mapproxy_log.addHandler(ch)
def write_header(f, capabilities):
print >>f, '# MapProxy configuration automatically generated from:'
print >>f, '# %s' % capabilities
print >>f, '#'
print >>f, '# NOTE: The generated configuration can be highly inefficient,'
print >>f, '# especially when multiple layers and caches are requested at once.'
print >>f, '# Make sure you understand the generated configuration!'
print >>f, '#'
print >>f, '# Created on %s with:' % datetime.datetime.now()
print >>f, ' \\\n'.join(textwrap.wrap(' '.join(sys.argv), initial_indent='# ', subsequent_indent='# '))
print >>f, ''
@contextmanager
def file_or_stdout(name):
if name == '-':
yield sys.stdout
else:
with open(name, 'wb') as f:
yield f
def config_command(args):
parser = optparse.OptionParser("usage: %prog autoconfig [options]")
parser.add_option('--capabilities',
help="URL or filename of WMS 1.1.1/1.3.0 capabilities document")
parser.add_option('--output', help="filename for created MapProxy config [default: -]", default="-")
parser.add_option('--output-seed', help="filename for created seeding config")
parser.add_option('--base', help='base config to include in created MapProxy config')
parser.add_option('--overwrite',
help='YAML file with overwrites for the created MapProxy config')
parser.add_option('--overwrite-seed',
help='YAML file with overwrites for the created seeding config')
parser.add_option('--force', default=False, action='store_true',
help="overwrite existing files")
options, args = parser.parse_args(args)
if not options.capabilities:
parser.print_help()
print >>sys.stderr, "\nERROR: --capabilities required"
return 2
if not options.output and not options.output_seed:
parser.print_help()
print >>sys.stderr, "\nERROR: --output and/or --output-seed required"
return 2
if not options.force:
if options.output and options.output != '-' and os.path.exists(options.output):
print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output
return 2
if options.output_seed and options.output_seed != '-' and os.path.exists(options.output_seed):
print >>sys.stderr, "\nERROR: %s already exists, use --force to overwrite" % options.output_seed
return 2
log = logging.getLogger('mapproxy_conf_cmd')
log.addHandler(logging.StreamHandler())
setup_logging(logging.WARNING)
srs_grids = {}
if options.base:
base = load_configuration(options.base)
for name, grid_conf in base.grids.iteritems():
if name.startswith('GLOBAL_'):
continue
srs_grids[grid_conf.tile_grid().srs.srs_code] = name
cap_doc = options.capabilities
if cap_doc.startswith(('http://', 'https://')):
cap_doc = download_capabilities(options.capabilities).read()
else:
cap_doc = open(cap_doc, 'rb').read()
try:
cap = parse_capabilities(StringIO(cap_doc))
except (xml.etree.ElementTree.ParseError, ValueError), ex:
print >>sys.stderr, ex
print >>sys.stderr, cap_doc[:1000] + ('...' if len(cap_doc) > 1000 else '')
return 3
overwrite = None
if options.overwrite:
with open(options.overwrite, 'rb') as f:
overwrite = yaml.load(f)
overwrite_seed = None
if options.overwrite_seed:
with open(options.overwrite_seed, 'rb') as f:
overwrite_seed = yaml.load(f)
conf = {}
if options.base:
conf['base'] = os.path.abspath(options.base)
conf['services'] = {'wms': {'md': {'title': cap.metadata()['title']}}}
if overwrite:
conf['services'] = update_config(conf['services'], overwrite.pop('service', {}))
conf['sources'] = sources(cap)
if overwrite:
conf['sources'] = update_config(conf['sources'], overwrite.pop('sources', {}))
conf['caches'] = caches(cap, conf['sources'], srs_grids=srs_grids)
if overwrite:
conf['caches'] = update_config(conf['caches'], overwrite.pop('caches', {}))
conf['layers'] = layers(cap, conf['caches'])
if overwrite:
conf['layers'] = update_config(conf['layers'], overwrite.pop('layers', {}))
if overwrite:
conf = update_config(conf, overwrite)
seed_conf = {}
seed_conf['seeds'], seed_conf['cleanups'] = seeds(cap, conf['caches'])
if overwrite_seed:
seed_conf = update_config(seed_conf, overwrite_seed)
if options.output:
with file_or_stdout(options.output) as f:
write_header(f, options.capabilities)
yaml.dump(conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper)
if options.output_seed:
with file_or_stdout(options.output_seed) as f:
write_header(f, options.capabilities)
yaml.dump(seed_conf, f, default_flow_style=False, Dumper=MapProxyYAMLDumper)
return 0 | apache-2.0 |
Subsets and Splits