repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringclasses 981
values | size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
yewang15215/django
|
tests/auth_tests/test_basic.py
|
12
|
7419
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.contrib.auth import get_user, get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.http import HttpRequest
from django.test import TestCase, override_settings
from django.utils import translation
from .models import CustomUser
class BasicTestCase(TestCase):
def test_user(self):
"Users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertFalse(u.is_anonymous)
self.assertTrue(u.is_authenticated)
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u2.has_usable_password())
def test_unicode_username(self):
User.objects.create_user('jörg')
User.objects.create_user('Григорий')
# Two equivalent unicode normalized usernames should be duplicates
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
User.objects.create_user(ohm_username)
with self.assertRaises(IntegrityError):
User.objects.create_user(omega_username)
def test_is_anonymous_authenticated_method_deprecation(self):
deprecation_message = (
'Using user.is_authenticated() and user.is_anonymous() as a '
'method is deprecated. Remove the parentheses to use it as an '
'attribute.'
)
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
# Backwards-compatibility callables
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertFalse(u.is_anonymous())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertTrue(u.is_authenticated())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
def test_user_no_email(self):
"Users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertIsNone(a.pk)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertTrue(a.is_anonymous)
self.assertFalse(a.is_authenticated)
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_anonymous_user_is_anonymous_authenticated_method_deprecation(self):
a = AnonymousUser()
deprecation_message = (
'Using user.is_authenticated() and user.is_anonymous() as a '
'method is deprecated. Remove the parentheses to use it as an '
'attribute.'
)
# Backwards-compatibility callables
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
self.assertTrue(a.is_anonymous())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
self.assertFalse(a.is_authenticated())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
class TestGetUser(TestCase):
def test_get_user_anonymous(self):
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, AnonymousUser)
def test_get_user(self):
created_user = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.client.login(username='testuser', password='testpw')
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
|
bsd-3-clause
|
vdemeester/docker-py
|
tests/integration/regression_test.py
|
4
|
2232
|
import io
import random
import docker
import six
from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
assert exc.value.is_error()
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
self.client.create_container(TEST_IMG, ['true'])
)
result = self.client.containers(all=True, trunc=True)
assert len(result[0]['Id']) == 12
def test_647_support_doubleslash_in_image_names(self):
with pytest.raises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
ctnr = self.client.create_container(TEST_IMG, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
)[0]['HostPort'] == six.text_type(udp_port)
|
apache-2.0
|
Workday/OpenFrame
|
tools/telemetry/catapult_base/refactor/offset_token.py
|
16
|
3155
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
import token
import tokenize
def _Pairwise(iterable):
"""s -> (None, s0), (s0, s1), (s1, s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
a = itertools.chain((None,), a)
return itertools.izip(a, b)
class OffsetToken(object):
"""A Python token with a relative position.
A token is represented by a type defined in Python's token module, a string
representing the content, and an offset. Using relative positions makes it
easy to insert and remove tokens.
"""
def __init__(self, token_type, string, offset):
self._type = token_type
self._string = string
self._offset = offset
@property
def type(self):
return self._type
@property
def type_name(self):
return token.tok_name[self._type]
@property
def string(self):
return self._string
@string.setter
def string(self, value):
self._string = value
@property
def offset(self):
return self._offset
def __str__(self):
return str((self.type_name, self.string, self.offset))
def Tokenize(f):
"""Read tokens from a file-like object.
Args:
f: Any object that has a readline method.
Returns:
A collections.deque containing OffsetTokens. Deques are cheaper and easier
to manipulate sequentially than lists.
"""
f.seek(0)
tokenize_tokens = tokenize.generate_tokens(f.readline)
offset_tokens = collections.deque()
for prev_token, next_token in _Pairwise(tokenize_tokens):
token_type, string, (srow, scol), _, _ = next_token
if not prev_token:
offset_tokens.append(OffsetToken(token_type, string, (0, 0)))
else:
erow, ecol = prev_token[3]
if erow == srow:
offset_tokens.append(OffsetToken(token_type, string, (0, scol-ecol)))
else:
offset_tokens.append(OffsetToken(token_type, string, (srow-erow, scol)))
return offset_tokens
def Untokenize(offset_tokens):
"""Return the string representation of an iterable of OffsetTokens."""
# Make a copy. Don't modify the original.
offset_tokens = collections.deque(offset_tokens)
# Strip leading NL tokens.
while offset_tokens[0].type == tokenize.NL:
offset_tokens.popleft()
# Strip leading vertical whitespace.
first_token = offset_tokens.popleft()
# Take care not to modify the existing token. Create a new one in its place.
first_token = OffsetToken(first_token.type, first_token.string,
(0, first_token.offset[1]))
offset_tokens.appendleft(first_token)
# Convert OffsetTokens to tokenize tokens.
tokenize_tokens = []
row = 1
col = 0
for t in offset_tokens:
offset_row, offset_col = t.offset
if offset_row == 0:
col += offset_col
else:
row += offset_row
col = offset_col
tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None))
# tokenize can't handle whitespace before line continuations.
# So add a space.
return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
|
bsd-3-clause
|
sippy/voiptests
|
test_cases/reinv_brkn2.py
|
1
|
2000
|
# Copyright (c) 2016 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.reinv_fail import a_test_reinv_fail
from test_cases.reinvite import b_test_reinvite
class a_test_reinv_brkn2(a_test_reinv_fail):
cld = 'bob_reinv_brkn2'
cli = 'alice_reinv_brkn2'
def reinvite(self, ua):
if not self.connect_done or self.disconnect_done:
return
sdp_body_bak = ua.lSDP
ua.lSDP = sdp_body_bak.getCopy()
for sect in ua.lSDP.content.sections:
sect.c_header = None
rval = a_test_reinv_fail.reinvite(self, ua)
ua.lSDP = sdp_body_bak
return rval
class b_test_reinv_brkn2(b_test_reinvite):
cli = 'bob_reinv_brkn2'
|
bsd-2-clause
|
akosyakov/intellij-community
|
python/lib/Lib/stat.py
|
145
|
1667
|
"""Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# XXX Strictly spoken, this module may have to be adapted for each POSIX
# implementation; in practice, however, the numeric constants used by
# stat() are almost universal (even for stat() emulations on non-UNIX
# systems like MS-DOS).
# Indices for stat struct members in tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
|
apache-2.0
|
gameduell/duell
|
bin/win/python2.7.9/Lib/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py
|
2965
|
12784
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
|
bsd-2-clause
|
mringel/ThinkStats2
|
code/timeseries.py
|
66
|
18035
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
|
gpl-3.0
|
spacewalkproject/spacewalk
|
java/scripts/api/managechannel.py
|
16
|
2371
|
#!/usr/bin/python
"""
Script to :
- create unique channels for given users
- Push Content to the same for each user
"""
import os
import xmlrpclib
# Setup
SATELLITE_HOST = "test10-64.rhndev.redhat.com"
SATELLITE_URL = "http://%s/rpc/api" % SATELLITE_HOST
SATELLITE_LOGIN_HASH ={'prad03':'redhat', 'prad02' : 'redhat'}
SUFFIX_HASH = {'prad03' : '03', 'prad02' : '02'}
CHANNEL_INFO = {'label' : 'channel-',
'name' : 'channel-',
'summary' : 'dummy channel',
'archLabel' : 'channel-ia32',
'parentLabel' : ''}
PKG_CONTENT_DIR = '/tmp/upload/'
client = xmlrpclib.Server(SATELLITE_URL, verbose=0)
def getKeys(users):
"""
Generate session key for each user
"""
keylist = {}
for login,password in users.items():
sessionkey = client.auth.login(login, password)
keylist[login] = sessionkey
return keylist
def createChannels(keylist, info):
"""
Create unique channels per user
"""
channel_list = {}
for login,key in keylist.items():
# create channel under each org
# Channel label,name should be unique
label = info['label'] + SUFFIX_HASH[login]
name = info['name'] + SUFFIX_HASH[login]
try:
print "Creating Channel: ",label
client.channel.software.create(key, label, name, \
info['summary'], info['archLabel'], \
info['parentLabel'])
except xmlrpclib.Fault, e:
print e
channel_list[login] = label
return channel_list
def pushContent(users, channels):
"""
Invoke rhnpush to push packages to channels
"""
for login,password in users.items():
print "Pushing Content to %s" % channels[login]
push_cmd = 'rhnpush --server=%s/APP --username=%s --password=%s \
--dir=%s --channel=%s -vvvv --tolerant --nosig' % \
(SATELLITE_HOST, login, password, PKG_CONTENT_DIR, \
channels[login])
os.system(push_cmd)
def main():
# Create Session keys
keys = getKeys(SATELLITE_LOGIN_HASH)
# Create channels
channel_list = createChannels(keys, CHANNEL_INFO)
# push content to channels
pushContent(SATELLITE_LOGIN_HASH, channel_list)
if __name__ == '__main__':
main()
|
gpl-2.0
|
moreati/django
|
django/utils/jslex.py
|
335
|
7778
|
"""JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
|
bsd-3-clause
|
puremourning/YouCompleteMe
|
python/ycm/client/completer_available_request.py
|
7
|
1464
|
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.client.base_request import BaseRequest, BuildRequestData
class CompleterAvailableRequest( BaseRequest ):
def __init__( self, filetypes ):
super( CompleterAvailableRequest, self ).__init__()
self.filetypes = filetypes
self._response = None
def Start( self ):
request_data = BuildRequestData()
request_data.update( { 'filetypes': self.filetypes } )
self._response = self.PostDataToHandler( request_data,
'semantic_completion_available' )
def Response( self ):
return self._response
def SendCompleterAvailableRequest( filetypes ):
request = CompleterAvailableRequest( filetypes )
# This is a blocking call.
request.Start()
return request.Response()
|
gpl-3.0
|
rizar/attention-lvcsr
|
libs/blocks/tests/monitoring/test_monitored_quantity.py
|
2
|
1638
|
import numpy
import theano
from fuel.datasets import IterableDataset
from blocks.monitoring.evaluators import DatasetEvaluator
from blocks.monitoring.aggregation import MonitoredQuantity
from blocks.bricks.cost import CategoricalCrossEntropy
class CrossEntropy(MonitoredQuantity):
def __init__(self, **kwargs):
super(CrossEntropy, self).__init__(**kwargs)
def initialize(self):
self.total_cross_entropy, self.examples_seen = 0.0, 0
def accumulate(self, target, predicted):
import numpy
self.total_cross_entropy += -(target * numpy.log(predicted)).sum()
self.examples_seen += 1
def readout(self):
res = self.total_cross_entropy / self.examples_seen
return res
def test_dataset_evaluators():
X = theano.tensor.vector('X')
Y = theano.tensor.vector('Y')
data = [numpy.arange(1, 7, dtype=theano.config.floatX).reshape(3, 2),
numpy.arange(11, 17, dtype=theano.config.floatX).reshape(3, 2)]
data_stream = IterableDataset(dict(X=data[0],
Y=data[1])).get_example_stream()
validator = DatasetEvaluator([
CrossEntropy(requires=[X, Y],
name="monitored_cross_entropy0"),
# to test two same quantities and make sure that state will be reset
CrossEntropy(requires=[X, Y],
name="monitored_cross_entropy1"),
CategoricalCrossEntropy().apply(X, Y), ])
values = validator.evaluate(data_stream)
numpy.testing.assert_allclose(
values['monitored_cross_entropy1'],
values['categoricalcrossentropy_apply_cost'])
|
mit
|
CatsAndDogsbvba/odoo
|
openerp/report/render/html2html/html2html.py
|
443
|
4238
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render.rml2pdf import utils
import copy
import base64
import cStringIO
import re
from reportlab.lib.utils import ImageReader
_regex = re.compile('\[\[(.+?)\]\]')
utils._regex = re.compile('\[\[\s*(.+?)\s*\]\]',re.DOTALL)
class html2html(object):
def __init__(self, html, localcontext):
self.localcontext = localcontext
self.etree = html
self._node = None
def render(self):
def process_text(node,new_node):
if new_node.tag in ['story','tr','section']:
new_node.attrib.clear()
for child in utils._child_get(node, self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
new_child.remove(n)
process_text(child, new_child)
else:
if new_child.tag=='img' and new_child.get('name'):
if _regex.findall(new_child.get('name')) :
src = utils._process_text(self, new_child.get('name'))
if src :
new_child.set('src','data:image/gif;base64,%s'%src)
output = cStringIO.StringIO(base64.decodestring(src))
img = ImageReader(output)
(width,height) = img.getSize()
if not new_child.get('width'):
new_child.set('width',str(width))
if not new_child.get('height') :
new_child.set('height',str(height))
else :
new_child.getparent().remove(new_child)
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
self._node = copy.deepcopy(self.etree)
for n in self._node:
self._node.remove(n)
process_text(self.etree, self._node)
return self._node
def url_modify(self,root):
for n in root:
if (n.text.find('<a ')>=0 or n.text.find('<a')>=0) and n.text.find('href')>=0 and n.text.find('style')<=0 :
node = (n.tag=='span' and n.getparent().tag=='u') and n.getparent().getparent() or ((n.tag=='span') and n.getparent()) or n
style = node.get('color') and "style='color:%s; text-decoration: none;'"%node.get('color') or ''
if n.text.find('<a')>=0:
t = '<a '
else :
t = '<a '
href = n.text.split(t)[-1]
n.text = ' '.join([t,style,href])
self.url_modify(n)
return root
def parseString(node, localcontext = {}):
r = html2html(node, localcontext)
root = r.render()
root = r.url_modify(root)
return root
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
cnrat/fail2ban
|
fail2ban/tests/action_d/test_badips.py
|
19
|
2735
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import unittest
import sys
from ..dummyjail import DummyJail
from ..utils import CONFIG_DIR
if sys.version_info >= (2,7):
class BadIPsActionTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.jail = DummyJail()
self.jail.actions.add("test")
pythonModule = os.path.join(CONFIG_DIR, "action.d", "badips.py")
self.jail.actions.add("badips", pythonModule, initOpts={
'category': "ssh",
'banaction': "test",
})
self.action = self.jail.actions["badips"]
def tearDown(self):
"""Call after every test case."""
# Must cancel timer!
if self.action._timer:
self.action._timer.cancel()
def testCategory(self):
categories = self.action.getCategories()
self.assertTrue("ssh" in categories)
self.assertTrue(len(categories) >= 10)
self.assertRaises(
ValueError, setattr, self.action, "category",
"invalid-category")
# Not valid for reporting category...
self.assertRaises(
ValueError, setattr, self.action, "category", "mail")
# but valid for blacklisting.
self.action.bancategory = "mail"
def testScore(self):
self.assertRaises(ValueError, setattr, self.action, "score", -5)
self.action.score = 5
self.action.score = "5"
def testBanaction(self):
self.assertRaises(
ValueError, setattr, self.action, "banaction",
"invalid-action")
self.action.banaction = "test"
def testUpdateperiod(self):
self.assertRaises(
ValueError, setattr, self.action, "updateperiod", -50)
self.assertRaises(
ValueError, setattr, self.action, "updateperiod", 0)
self.action.updateperiod = 900
self.action.updateperiod = "900"
def testStart(self):
self.action.start()
self.assertTrue(len(self.action._bannedips) > 10)
def testStop(self):
self.testStart()
self.action.stop()
self.assertTrue(len(self.action._bannedips) == 0)
|
gpl-2.0
|
nemesisdesign/django
|
tests/null_queries/tests.py
|
55
|
2973
|
from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
with self.assertRaises(FieldError):
Choice.objects.filter(foo__exact=None)
# Can't use None on anything other than __exact and __iexact
with self.assertRaises(ValueError):
Choice.objects.filter(id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
|
bsd-3-clause
|
renesugar/arrow
|
python/pyarrow/tests/test_jvm.py
|
5
|
13848
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import pyarrow as pa
import pyarrow.jvm as pa_jvm
import pytest
import six
import sys
import xml.etree.ElementTree as ET
jpype = pytest.importorskip("jpype")
@pytest.fixture(scope="session")
def root_allocator():
# This test requires Arrow Java to be built in the same source tree
pom_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'pom.xml')
tree = ET.parse(pom_path)
version = tree.getroot().find(
'POM:version',
namespaces={
'POM': 'http://maven.apache.org/POM/4.0.0'
}).text
jar_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'tools', 'target',
'arrow-tools-{}-jar-with-dependencies.jar'.format(version))
jar_path = os.getenv("ARROW_TOOLS_JAR", jar_path)
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path=" + jar_path)
return jpype.JPackage("org").apache.arrow.memory.RootAllocator(sys.maxsize)
def test_jvm_buffer(root_allocator):
# Create a buffer
jvm_buffer = root_allocator.buffer(8)
for i in range(8):
jvm_buffer.setByte(i, 8 - i)
# Convert to Python
buf = pa_jvm.jvm_buffer(jvm_buffer)
# Check its content
assert buf.to_pybytes() == b'\x08\x07\x06\x05\x04\x03\x02\x01'
def _jvm_field(jvm_spec):
om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
pojo_Field = jpype.JClass('org.apache.arrow.vector.types.pojo.Field')
return om.readValue(jvm_spec, pojo_Field)
def _jvm_schema(jvm_spec, metadata=None):
field = _jvm_field(jvm_spec)
schema_cls = jpype.JClass('org.apache.arrow.vector.types.pojo.Schema')
fields = jpype.JClass('java.util.ArrayList')()
fields.add(field)
if metadata:
dct = jpype.JClass('java.util.HashMap')()
for k, v in six.iteritems(metadata):
dct.put(k, v)
return schema_cls(fields, dct)
else:
return schema_cls(fields)
# In the following, we use the JSON serialization of the Field objects in Java.
# This ensures that we neither rely on the exact mechanics on how to construct
# them using Java code as well as enables us to define them as parameters
# without to invoke the JVM.
#
# The specifications were created using:
#
# om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
# field = … # Code to instantiate the field
# jvm_spec = om.writeValueAsString(field)
@pytest.mark.parametrize('pa_type,jvm_spec', [
(pa.null(), '{"name":"null"}'),
(pa.bool_(), '{"name":"bool"}'),
(pa.int8(), '{"name":"int","bitWidth":8,"isSigned":true}'),
(pa.int16(), '{"name":"int","bitWidth":16,"isSigned":true}'),
(pa.int32(), '{"name":"int","bitWidth":32,"isSigned":true}'),
(pa.int64(), '{"name":"int","bitWidth":64,"isSigned":true}'),
(pa.uint8(), '{"name":"int","bitWidth":8,"isSigned":false}'),
(pa.uint16(), '{"name":"int","bitWidth":16,"isSigned":false}'),
(pa.uint32(), '{"name":"int","bitWidth":32,"isSigned":false}'),
(pa.uint64(), '{"name":"int","bitWidth":64,"isSigned":false}'),
(pa.float16(), '{"name":"floatingpoint","precision":"HALF"}'),
(pa.float32(), '{"name":"floatingpoint","precision":"SINGLE"}'),
(pa.float64(), '{"name":"floatingpoint","precision":"DOUBLE"}'),
(pa.time32('s'), '{"name":"time","unit":"SECOND","bitWidth":32}'),
(pa.time32('ms'), '{"name":"time","unit":"MILLISECOND","bitWidth":32}'),
(pa.time64('us'), '{"name":"time","unit":"MICROSECOND","bitWidth":64}'),
(pa.time64('ns'), '{"name":"time","unit":"NANOSECOND","bitWidth":64}'),
(pa.timestamp('s'), '{"name":"timestamp","unit":"SECOND",'
'"timezone":null}'),
(pa.timestamp('ms'), '{"name":"timestamp","unit":"MILLISECOND",'
'"timezone":null}'),
(pa.timestamp('us'), '{"name":"timestamp","unit":"MICROSECOND",'
'"timezone":null}'),
(pa.timestamp('ns'), '{"name":"timestamp","unit":"NANOSECOND",'
'"timezone":null}'),
(pa.timestamp('ns', tz='UTC'), '{"name":"timestamp","unit":"NANOSECOND"'
',"timezone":"UTC"}'),
(pa.timestamp('ns', tz='Europe/Paris'), '{"name":"timestamp",'
'"unit":"NANOSECOND","timezone":"Europe/Paris"}'),
(pa.date32(), '{"name":"date","unit":"DAY"}'),
(pa.date64(), '{"name":"date","unit":"MILLISECOND"}'),
(pa.decimal128(19, 4), '{"name":"decimal","precision":19,"scale":4}'),
(pa.string(), '{"name":"utf8"}'),
(pa.binary(), '{"name":"binary"}'),
(pa.binary(10), '{"name":"fixedsizebinary","byteWidth":10}'),
# TODO(ARROW-2609): complex types that have children
# pa.list_(pa.int32()),
# pa.struct([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
# TODO: DictionaryType requires a vector in the type
# pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])),
])
@pytest.mark.parametrize('nullable', [True, False])
def test_jvm_types(root_allocator, pa_type, jvm_spec, nullable):
spec = {
'name': 'field_name',
'nullable': nullable,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
result = pa_jvm.field(jvm_field)
expected_field = pa.field('field_name', pa_type, nullable=nullable)
assert result == expected_field
jvm_schema = _jvm_schema(json.dumps(spec))
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field])
# Schema with custom metadata
jvm_schema = _jvm_schema(json.dumps(spec), {'meta': 'data'})
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field], {'meta': 'data'})
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type', [
(pa.bool_(), [True, False, True, True], 'BitVector'),
(pa.uint8(), list(range(128)), 'UInt1Vector'),
(pa.uint16(), list(range(128)), 'UInt2Vector'),
(pa.int32(), list(range(128)), 'IntVector'),
(pa.int64(), list(range(128)), 'BigIntVector'),
(pa.float32(), list(range(128)), 'Float4Vector'),
(pa.float64(), list(range(128)), 'Float8Vector'),
(pa.timestamp('s'), list(range(128)), 'TimeStampSecVector'),
(pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector'),
(pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector'),
(pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector'),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(pa.date32(), list(range(128)), 'DateDayVector'),
(pa.date64(), list(range(128)), 'DateMilliVector'),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_array(root_allocator, pa_type, py_data, jvm_type):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
py_array = pa.array(py_data, type=pa_type)
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type,jvm_spec', [
# TODO: null
(pa.bool_(), [True, False, True, True], 'BitVector', '{"name":"bool"}'),
(
pa.uint8(),
list(range(128)),
'UInt1Vector',
'{"name":"int","bitWidth":8,"isSigned":false}'
),
(
pa.uint16(),
list(range(128)),
'UInt2Vector',
'{"name":"int","bitWidth":16,"isSigned":false}'
),
(
pa.uint32(),
list(range(128)),
'UInt4Vector',
'{"name":"int","bitWidth":32,"isSigned":false}'
),
(
pa.uint64(),
list(range(128)),
'UInt8Vector',
'{"name":"int","bitWidth":64,"isSigned":false}'
),
(
pa.int8(),
list(range(128)),
'TinyIntVector',
'{"name":"int","bitWidth":8,"isSigned":true}'
),
(
pa.int16(),
list(range(128)),
'SmallIntVector',
'{"name":"int","bitWidth":16,"isSigned":true}'
),
(
pa.int32(),
list(range(128)),
'IntVector',
'{"name":"int","bitWidth":32,"isSigned":true}'
),
(
pa.int64(),
list(range(128)),
'BigIntVector',
'{"name":"int","bitWidth":64,"isSigned":true}'
),
# TODO: float16
(
pa.float32(),
list(range(128)),
'Float4Vector',
'{"name":"floatingpoint","precision":"SINGLE"}'
),
(
pa.float64(),
list(range(128)),
'Float8Vector',
'{"name":"floatingpoint","precision":"DOUBLE"}'
),
(
pa.timestamp('s'),
list(range(128)),
'TimeStampSecVector',
'{"name":"timestamp","unit":"SECOND","timezone":null}'
),
(
pa.timestamp('ms'),
list(range(128)),
'TimeStampMilliVector',
'{"name":"timestamp","unit":"MILLISECOND","timezone":null}'
),
(
pa.timestamp('us'),
list(range(128)),
'TimeStampMicroVector',
'{"name":"timestamp","unit":"MICROSECOND","timezone":null}'
),
(
pa.timestamp('ns'),
list(range(128)),
'TimeStampNanoVector',
'{"name":"timestamp","unit":"NANOSECOND","timezone":null}'
),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(
pa.date32(),
list(range(128)),
'DateDayVector',
'{"name":"date","unit":"DAY"}'
),
(
pa.date64(),
list(range(128)),
'DateMilliVector',
'{"name":"date","unit":"MILLISECOND"}'
),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_record_batch(root_allocator, pa_type, py_data, jvm_type,
jvm_spec):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
# Create field
spec = {
'name': 'field_name',
'nullable': False,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
# Create VectorSchemaRoot
jvm_fields = jpype.JClass('java.util.ArrayList')()
jvm_fields.add(jvm_field)
jvm_vectors = jpype.JClass('java.util.ArrayList')()
jvm_vectors.add(jvm_vector)
jvm_vsr = jpype.JClass('org.apache.arrow.vector.VectorSchemaRoot')
jvm_vsr = jvm_vsr(jvm_fields, jvm_vectors, len(py_data))
py_record_batch = pa.RecordBatch.from_arrays(
[pa.array(py_data, type=pa_type)],
['col']
)
jvm_record_batch = pa_jvm.record_batch(jvm_vsr)
assert py_record_batch.equals(jvm_record_batch)
def _string_to_varchar_holder(ra, string):
nvch_cls = "org.apache.arrow.vector.holders.NullableVarCharHolder"
holder = jpype.JClass(nvch_cls)()
if string is None:
holder.isSet = 0
else:
holder.isSet = 1
value = jpype.JClass("java.lang.String")("string")
std_charsets = jpype.JClass("java.nio.charset.StandardCharsets")
bytes_ = value.getBytes(std_charsets.UTF_8)
holder.buffer = ra.buffer(len(bytes_))
holder.buffer.setBytes(0, bytes_, 0, len(bytes_))
holder.start = 0
holder.end = len(bytes_)
return holder
# TODO(ARROW-2607)
@pytest.mark.xfail(reason="from_buffers is only supported for "
"primitive arrays yet")
def test_jvm_string_array(root_allocator):
data = [u"string", None, u"töst"]
cls = "org.apache.arrow.vector.VarCharVector"
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew()
for i, string in enumerate(data):
holder = _string_to_varchar_holder(root_allocator, "string")
jvm_vector.setSafe(i, holder)
jvm_vector.setValueCount(i + 1)
py_array = pa.array(data, type=pa.string())
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
|
apache-2.0
|
ol-loginov/intellij-community
|
python/lib/Lib/distutils/command/install_egg_info.py
|
438
|
2587
|
"""distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
|
apache-2.0
|
pycroscopy/pycroscopy
|
tests/io/test_hdf_writer.py
|
1
|
36224
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import h5py
import numpy as np
import sys
sys.path.append("../../../pycroscopy/")
from pycroscopy.io.virtual_data import VirtualGroup, VirtualDataset
from pycroscopy.io.hdf_writer import HDFwriter
from pyUSID.io.hdf_utils import get_attr, get_h5_obj_refs # Until an elegant solution presents itself
class TestHDFWriter(unittest.TestCase):
@staticmethod
def __delete_existing_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def test_init_invalid_input(self):
with self.assertRaises(TypeError):
_ = HDFwriter(4)
def test_init_path_non_existant_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_path_existing_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_r_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r')
# hdf handle but of mode r
with self.assertRaises(TypeError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_init_h5_handle_r_plus_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r+')
# open h5 file handle or mode r+
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_w_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='w')
# open h5 file handle or mode w
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_closed(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file but closed
with self.assertRaises(ValueError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_simple_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dtype = np.uint16
dset_name = 'test'
data = np.random.randint(0, high=15, size=5, dtype=dtype)
microdset = VirtualDataset(dset_name, data)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
os.remove(file_path)
def test_simple_dset_write_success_more_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = data.dtype
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
os.remove(file_path)
def test_simple_dset_write_success_more_options_03(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = np.float16
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertTrue(np.all(h5_d[()] - data < 1E-3))
os.remove(file_path)
def test_empty_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
microdset = VirtualDataset(dset_name, None, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
# dtype is assigned automatically by h5py. Not to be tested here
os.remove(file_path)
def test_empty_dset_write_success_w_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
chunking = (1, 1024)
compression = 'gzip'
dtype = np.float16
microdset = VirtualDataset(dset_name, None, maxshape=maxshape,
dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
os.remove(file_path)
def test_expandable_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (None, 1024)
data = np.random.rand(1, 1024)
microdset = VirtualDataset(dset_name, data, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_resizeable_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.maxshape, maxshape)
self.assertTrue(np.allclose(h5_d[()], data))
# Now test to make sure that the dataset can be expanded:
# TODO: add this to the example!
expansion_axis = 0
h5_d.resize(h5_d.shape[expansion_axis] + 1, axis=expansion_axis)
self.assertEqual(h5_d.shape, (data.shape[0]+1, data.shape[1]))
self.assertEqual(h5_d.maxshape, maxshape)
# Finally try checking to see if this new data is also present in the file
new_data = np.random.rand(1024)
h5_d[1] = new_data
data = np.vstack((np.squeeze(data), new_data))
self.assertTrue(np.allclose(h5_d[()], data))
os.remove(file_path)
# TODO: will have to check to see if the parent is correctly declared for the group
def test_group_create_non_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name)
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test_'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name + '000')
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_root_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = ''
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
with self.assertRaises(ValueError):
_ = writer._create_group(h5_f, micro_group)
os.remove(file_path)
def test_group_create_indexed_nested_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
outer_grp_name = 'outer_'
micro_group = VirtualGroup(outer_grp_name)
writer = HDFwriter(h5_f)
h5_outer_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_outer_grp, h5py.Group)
self.assertEqual(h5_outer_grp.parent, h5_f)
self.assertEqual(h5_outer_grp.name, '/' + outer_grp_name + '000')
inner_grp_name = 'inner_'
micro_group = VirtualGroup(inner_grp_name)
h5_inner_grp = writer._create_group(h5_outer_grp, micro_group)
self.assertIsInstance(h5_inner_grp, h5py.Group)
self.assertEqual(h5_inner_grp.parent, h5_outer_grp)
self.assertEqual(h5_inner_grp.name, h5_outer_grp.name + '/' + inner_grp_name + '000')
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data_2nd_dim(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 3)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(None), slice(0, None, 2)),
'odd_rows': (slice(None), slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:, 0:None:2], data[:, 1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_one_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_generate_and_write_reg_ref_legal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': ['row_1', 'row_2']}
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, attrs.copy())
else:
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels']) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[0], data[1]]
written_data = [h5_dset[h5_dset.attrs['row_1']], h5_dset[h5_dset.attrs['row_2']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(np.squeeze(exp), np.squeeze(act)))
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(3, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, {'labels': ['row_1', 'row_2']})
self.assertEqual(len(h5_dset.attrs), 0)
h5_f.flush()
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, {'labels': [1, np.arange(3)]})
os.remove(file_path)
def test_write_illegal_reg_ref_too_many_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None), slice(None))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_illegal_reg_ref_too_few_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_reg_ref_slice_dim_larger_than_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, 15, 2), slice(None)),
'odd_rows': (slice(1, 15, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_illegal_reg_ref_not_slice_objs(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), 15),
'odd_rows': (slice(1, None, 2), 'hello')}}
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_simple_atts_reg_ref_to_dset(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
writer._write_dset_attributes(h5_dset, attrs.copy())
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_invalid_input(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(TypeError):
_ = writer.write(np.arange(5))
def test_write_dset_under_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data)
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_existing_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
h5_g = writer._create_group(h5_f, VirtualGroup('test_group'))
self.assertIsInstance(h5_g, h5py.Group)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data, parent='/test_group')
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_g)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_invalid_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(KeyError):
_ = writer.write(VirtualDataset('test', np.random.rand(5, 7), parent='/does_not_exist'))
os.remove(file_path)
def test_write_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[ret_val] = writer.write(micro_group)
self.assertIsInstance(ret_val, h5py.File)
self.assertEqual(h5_f, ret_val)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_f, key) == expected_val))
os.remove(file_path)
def test_write_single_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('Test_')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[h5_group] = writer.write(micro_group)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_group, key) == expected_val))
os.remove(file_path)
def test_group_indexing_sequential(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
micro_group_0 = VirtualGroup('Test_', attrs={'att_1': 'string_val', 'att_2': 1.2345})
[h5_group_0] = writer.write(micro_group_0)
_ = writer.write(VirtualGroup('blah'))
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
[h5_group_1] = writer.write(micro_group_1)
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_group_indexing_simultaneous(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
micro_group_0 = VirtualGroup('Test_', attrs = {'att_1': 'string_val', 'att_2': 1.2345})
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
root_group = VirtualGroup('', children=[VirtualGroup('blah'), micro_group_0,
VirtualGroup('meh'), micro_group_1])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(root_group)
[h5_group_1] = get_h5_obj_refs(['Test_001'], h5_refs_list)
[h5_group_0] = get_h5_obj_refs(['Test_000'], h5_refs_list)
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_write_simple_tree(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
inner_dset_data = np.random.rand(5, 7)
inner_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
inner_dset = VirtualDataset('inner_dset', inner_dset_data)
inner_dset.attrs = inner_dset_attrs.copy()
attrs_inner_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
inner_group = VirtualGroup('indexed_inner_group_')
inner_group.attrs = attrs_inner_grp
inner_group.add_children(inner_dset)
outer_dset_data = np.random.rand(5, 7)
outer_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
outer_dset = VirtualDataset('test', outer_dset_data, parent='/test_group')
outer_dset.attrs = outer_dset_attrs.copy()
attrs_outer_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
outer_group = VirtualGroup('unindexed_outer_group')
outer_group.attrs = attrs_outer_grp
outer_group.add_children([inner_group, outer_dset])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(outer_group)
# I don't know of a more elegant way to do this:
[h5_outer_dset] = get_h5_obj_refs([outer_dset.name], h5_refs_list)
[h5_inner_dset] = get_h5_obj_refs([inner_dset.name], h5_refs_list)
[h5_outer_group] = get_h5_obj_refs([outer_group.name], h5_refs_list)
[h5_inner_group] = get_h5_obj_refs(['indexed_inner_group_000'], h5_refs_list)
self.assertIsInstance(h5_outer_dset, h5py.Dataset)
self.assertIsInstance(h5_inner_dset, h5py.Dataset)
self.assertIsInstance(h5_outer_group, h5py.Group)
self.assertIsInstance(h5_inner_group, h5py.Group)
# check assertions for the inner dataset first
self.assertEqual(h5_inner_dset.parent, h5_inner_group)
reg_ref = inner_dset_attrs.pop('labels')
self.assertEqual(len(h5_inner_dset.attrs), len(inner_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in inner_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_inner_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_inner_dset, 'labels')]))
expected_data = [inner_dset_data[:None:2], inner_dset_data[1:None:2]]
written_data = [h5_inner_dset[h5_inner_dset.attrs['even_rows']], h5_inner_dset[h5_inner_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# check assertions for the inner data group next:
self.assertEqual(h5_inner_group.parent, h5_outer_group)
for key, expected_val in attrs_inner_grp.items():
self.assertTrue(np.all(get_attr(h5_inner_group, key) == expected_val))
# check the outer dataset next:
self.assertEqual(h5_outer_dset.parent, h5_outer_group)
reg_ref = outer_dset_attrs.pop('labels')
self.assertEqual(len(h5_outer_dset.attrs), len(outer_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in outer_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_outer_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_outer_dset, 'labels')]))
expected_data = [outer_dset_data[:None:2], outer_dset_data[1:None:2]]
written_data = [h5_outer_dset[h5_outer_dset.attrs['even_rows']],
h5_outer_dset[h5_outer_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# Finally check the outer group:
self.assertEqual(h5_outer_group.parent, h5_f)
for key, expected_val in attrs_outer_grp.items():
self.assertTrue(np.all(get_attr(h5_outer_group, key) == expected_val))
os.remove(file_path)
if __name__ == '__main__':
unittest.main()
|
mit
|
gmalmquist/pants
|
src/python/pants/backend/python/interpreter_cache.py
|
5
|
8302
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pex.interpreter import PythonIdentity, PythonInterpreter
from pex.package import EggPackage, Package, SourcePackage
from pex.resolver import resolve
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
# TODO(wickman) Create a safer version of this and add to twitter.common.dirutil
def _safe_link(src, dst):
try:
os.unlink(dst)
except OSError:
pass
os.symlink(src, dst)
class PythonInterpreterCache(object):
@staticmethod
def _matches(interpreter, filters):
return any(interpreter.identity.matches(filt) for filt in filters)
@classmethod
def _matching(cls, interpreters, filters):
for interpreter in interpreters:
if cls._matches(interpreter, filters):
yield interpreter
@classmethod
def select_interpreter(cls, compatibilities, allow_multiple=False):
"""Given a set of interpreters, either return them all if ``allow_multiple`` is ``True``;
otherwise, return the lowest compatible interpreter.
"""
if allow_multiple:
return compatibilities
return [min(compatibilities)] if compatibilities else []
def __init__(self, python_setup, python_repos, logger=None):
self._python_setup = python_setup
self._python_repos = python_repos
self._cache_dir = python_setup.interpreter_cache_dir
safe_mkdir(self._cache_dir)
self._interpreters = set()
self._logger = logger or (lambda msg: True)
self._default_filters = (python_setup.interpreter_requirement or b'',)
@property
def interpreters(self):
"""Returns the set of cached interpreters."""
return self._interpreters
def _interpreter_from_path(self, path, filters):
interpreter_dir = os.path.basename(path)
identity = PythonIdentity.from_path(interpreter_dir)
try:
executable = os.readlink(os.path.join(path, 'python'))
except OSError:
return None
interpreter = PythonInterpreter(executable, identity)
if self._matches(interpreter, filters):
return self._resolve(interpreter)
return None
def _setup_interpreter(self, interpreter, cache_target_path):
with safe_concurrent_creation(cache_target_path) as safe_path:
os.mkdir(safe_path) # Parent will already have been created by safe_concurrent_creation.
os.symlink(interpreter.binary, os.path.join(safe_path, 'python'))
return self._resolve(interpreter, safe_path)
def _setup_cached(self, filters):
"""Find all currently-cached interpreters."""
for interpreter_dir in os.listdir(self._cache_dir):
path = os.path.join(self._cache_dir, interpreter_dir)
pi = self._interpreter_from_path(path, filters)
if pi:
self._logger('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity)))
self._interpreters.add(pi)
def _setup_paths(self, paths, filters):
"""Find interpreters under paths, and cache them."""
for interpreter in self._matching(PythonInterpreter.all(paths), filters):
identity_str = str(interpreter.identity)
cache_path = os.path.join(self._cache_dir, identity_str)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
self._setup_interpreter(interpreter, cache_path)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
continue
self._interpreters.add(pi)
def matched_interpreters(self, filters):
"""Given some filters, yield any interpreter that matches at least one of them.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
for match in self._matching(self._interpreters, filters):
yield match
def setup(self, paths=(), force=False, filters=(b'',)):
"""Sets up a cache of python interpreters.
NB: Must be called prior to accessing the ``interpreters`` property or the ``matches`` method.
:param paths: The paths to search for a python interpreter; the system ``PATH`` by default.
:param bool force: When ``True`` the interpreter cache is always re-built.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
filters = self._default_filters if not any(filters) else filters
setup_paths = paths or os.getenv('PATH').split(os.pathsep)
self._setup_cached(filters)
def unsatisfied_filters():
return filter(lambda filt: len(list(self._matching(self._interpreters, [filt]))) == 0, filters)
if force or len(unsatisfied_filters()) > 0:
self._setup_paths(setup_paths, filters)
for filt in unsatisfied_filters():
self._logger('No valid interpreters found for {}!'.format(filt))
matches = list(self.matched_interpreters(filters))
if len(matches) == 0:
self._logger('Found no valid interpreters!')
return matches
def _resolve(self, interpreter, interpreter_dir=None):
"""Resolve and cache an interpreter with a setuptools and wheel capability."""
interpreter = self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.setuptools_requirement())
if interpreter:
return self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.wheel_requirement())
def _resolve_interpreter(self, interpreter, interpreter_dir, requirement):
"""Given a :class:`PythonInterpreter` and a requirement, return an interpreter with the
capability of resolving that requirement or ``None`` if it's not possible to install a
suitable requirement.
If interpreter_dir is unspecified, operates on the default location.
"""
if interpreter.satisfies([requirement]):
return interpreter
if not interpreter_dir:
interpreter_dir = os.path.join(self._cache_dir, str(interpreter.identity))
target_link = os.path.join(interpreter_dir, requirement.key)
bdist = self._resolve_and_link(interpreter, requirement, target_link)
if bdist:
return interpreter.with_extra(bdist.name, bdist.raw_version, bdist.path)
else:
self._logger('Failed to resolve requirement {} for {}'.format(requirement, interpreter))
def _resolve_and_link(self, interpreter, requirement, target_link):
# Short-circuit if there is a local copy.
if os.path.exists(target_link) and os.path.exists(os.path.realpath(target_link)):
bdist = Package.from_href(os.path.realpath(target_link))
if bdist.satisfies(requirement):
return bdist
# Since we're resolving to bootstrap a bare interpreter, we won't have wheel available.
# Explicitly set the precedence to avoid resolution of wheels or distillation of sdists into
# wheels.
precedence = (EggPackage, SourcePackage)
distributions = resolve(requirements=[requirement],
fetchers=self._python_repos.get_fetchers(),
interpreter=interpreter,
context=self._python_repos.get_network_context(),
precedence=precedence)
if not distributions:
return None
assert len(distributions) == 1, ('Expected exactly 1 distribution to be resolved for {}, '
'found:\n\t{}'.format(requirement,
'\n\t'.join(map(str, distributions))))
dist_location = distributions[0].location
target_location = os.path.join(os.path.dirname(target_link), os.path.basename(dist_location))
shutil.move(dist_location, target_location)
_safe_link(target_location, target_link)
self._logger(' installed {}'.format(target_location))
return Package.from_href(target_location)
|
apache-2.0
|
Vauxoo/maintainer-tools
|
tools/set_repo_labels.py
|
13
|
2539
|
# -*- coding: utf-8 -*-
"""
Create and modify labels on github to have same labels and same color
on all repo
"""
from .github_login import login
REPO_TO_IGNORE = [
'odoo-community.org',
'community-data-files',
'contribute-md-template',
'website',
]
# here is the list of labels we need in each repo
all_labels = {
'7.0': '000000',
'8.0': '000000',
'bug': 'fc2929',
'duplicate': 'cccccc',
'enhancement': '84b6eb',
'help wanted': '159818',
'invalid': 'e6e6e6',
'question': 'cc317c',
'needs fixing': 'eb6420',
'needs review': 'fbca04',
'work in progress': '0052cc',
'wontfix': 'ffffff',
}
def main():
gh = login()
all_repos = gh.iter_user_repos('OCA')
for repo in all_repos:
if repo.name in REPO_TO_IGNORE:
continue
labels = repo.iter_labels()
existing_labels = dict((l.name, l.color) for l in labels)
to_create = []
to_change_color = []
for needed_label in all_labels:
if needed_label not in existing_labels.keys():
to_create.append(needed_label)
elif existing_labels[needed_label] != all_labels[needed_label]:
to_change_color.append(needed_label)
extra_labels = [l for l in existing_labels if l not in all_labels]
if to_create:
print ('Repo %s - Create %s missing labels'
% (repo.name, len(to_create)))
for label_name in to_create:
success = repo.create_label(label_name, all_labels[label_name])
if not success:
print ("Failed to create a label on '%s'!"
" Please check you access right to this repository."
% repo.name)
if to_change_color:
print ('Repo %s - Update %s labels with wrong color'
% (repo.name, len(to_change_color)))
for label_name in to_change_color:
success = repo.update_label(label_name, all_labels[label_name])
if not success:
print ("Failed to update a label on '%s'!"
" Please check you access right to this repository."
% repo.name)
if extra_labels:
print ('Repo %s - Found %s extra labels'
% (repo.name, len(extra_labels)))
for label_name in extra_labels:
print label_name
if __name__ == '__main__':
main()
|
agpl-3.0
|
ppizarror/Hero-of-Antair
|
data/images/pil/ImageChops.py
|
2
|
7410
|
#
# The Python Imaging Library.
# $Id$
#
# standard channel operations
#
# History:
# 1996-03-24 fl Created
# 1996-08-13 fl Added logical operations (for "1" images)
# 2000-10-12 fl Added offset method (from Image.py)
#
# Copyright (c) 1997-2000 by Secret Labs AB
# Copyright (c) 1996-2000 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
##
# The <b>ImageChops</b> module contains a number of arithmetical image
# operations, called <i>channel operations</i> ("chops"). These can be
# used for various purposes, including special effects, image
# compositions, algorithmic painting, and more.
# <p>
# At this time, channel operations are only implemented for 8-bit
# images (e.g. "L" and "RGB").
# <p>
# Most channel operations take one or two image arguments and returns
# a new image. Unless otherwise noted, the result of a channel
# operation is always clipped to the range 0 to MAX (which is 255 for
# all modes supported by the operations in this module).
##
##
# Return an image with the same size as the given image, but filled
# with the given pixel value.
#
# @param image Reference image.
# @param value Pixel value.
# @return An image object.
def constant(image, value):
"Fill a channel with a given grey level"
return Image.new("L", image.size, value)
##
# Copy image.
#
# @param image Source image.
# @return A copy of the source image.
def duplicate(image):
"Create a copy of a channel"
return image.copy()
##
# Inverts an image
# (MAX - image).
#
# @param image Source image.
# @return An image object.
def invert(image):
"Invert a channel"
image.load()
return image._new(image.im.chop_invert())
##
# Compare images, and return lighter pixel value
# (max(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the lighter values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def lighter(image1, image2):
"Select the lighter pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
##
# Compare images, and return darker pixel value
# (min(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the darker values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def darker(image1, image2):
"Select the darker pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
##
# Calculate absolute difference
# (abs(image1 - image2)).
# <p>
# Returns the absolute value of the difference between the two images.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def difference(image1, image2):
"Subtract one image from another"
image1.load()
image2.load()
return image1._new(image1.im.chop_difference(image2.im))
##
# Superimpose positive images
# (image1 * image2 / MAX).
# <p>
# Superimposes two images on top of each other. If you multiply an
# image with a solid black image, the result is black. If you multiply
# with a solid white image, the image is unaffected.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def multiply(image1, image2):
"Superimpose two positive images"
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im))
##
# Superimpose negative images
# (MAX - ((MAX - image1) * (MAX - image2) / MAX)).
# <p>
# Superimposes two inverted images on top of each other.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def screen(image1, image2):
"Superimpose two negative images"
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
##
# Add images
# ((image1 + image2) / scale + offset).
# <p>
# Adds two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add(image1, image2, scale=1.0, offset=0):
"Add two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_add(image2.im, scale, offset))
##
# Subtract images
# ((image1 - image2) / scale + offset).
# <p>
# Subtracts two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract(image1, image2, scale=1.0, offset=0):
"Subtract two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
##
# Add images without clipping
# ((image1 + image2) % MAX).
# <p>
# Adds two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add_modulo(image1, image2):
"Add two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_add_modulo(image2.im))
##
# Subtract images without clipping
# ((image1 - image2) % MAX).
# <p>
# Subtracts two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract_modulo(image1, image2):
"Subtract two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
##
# Logical AND
# (image1 and image2).
def logical_and(image1, image2):
"Logical and between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
##
# Logical OR
# (image1 or image2).
def logical_or(image1, image2):
"Logical or between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_or(image2.im))
##
# Logical XOR
# (image1 xor image2).
def logical_xor(image1, image2):
"Logical xor between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_xor(image2.im))
##
# Blend images using constant transparency weight.
# <p>
# Same as the <b>blend</b> function in the <b>Image</b> module.
def blend(image1, image2, alpha):
"Blend two images using a constant transparency weight"
return Image.blend(image1, image2, alpha)
##
# Create composite using transparency mask.
# <p>
# Same as the <b>composite</b> function in the <b>Image</b> module.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
return Image.composite(image1, image2, mask)
##
# Offset image data.
# <p>
# Returns a copy of the image where data has been offset by the given
# distances. Data wraps around the edges. If yoffset is omitted, it
# is assumed to be equal to xoffset.
#
# @param image Source image.
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(image, xoffset, yoffset=None):
"Offset image in horizontal and/or vertical direction"
if yoffset is None:
yoffset = xoffset
image.load()
return image._new(image.im.offset(xoffset, yoffset))
|
gpl-2.0
|
BMJHayward/numpy
|
numpy/polynomial/hermite_e.py
|
49
|
57120
|
"""
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def hermefromroots(roots):
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
return c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.rollaxis(v, 0, v.ndim)
def hermevander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefik, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermevander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_e_n(x, n):
"""
Evaluate a normalized HermiteE polynomial.
Compute the value of the normalized HermiteE polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized HermiteE function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard HermiteE functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(1./nd)
nd = nd - 1.0
return c0 + c1*x
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\exp(-x^2/2)` and the interval of
integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
class HermiteE(ABCPolyBase):
"""An HermiteE series class.
The HermiteE class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
HermiteE coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermeadd)
_sub = staticmethod(hermesub)
_mul = staticmethod(hermemul)
_div = staticmethod(hermediv)
_pow = staticmethod(hermepow)
_val = staticmethod(hermeval)
_int = staticmethod(hermeint)
_der = staticmethod(hermeder)
_fit = staticmethod(hermefit)
_line = staticmethod(hermeline)
_roots = staticmethod(hermeroots)
_fromroots = staticmethod(hermefromroots)
# Virtual properties
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
|
bsd-3-clause
|
entomb/CouchPotatoServer
|
libs/xmpp/commands.py
|
200
|
16116
|
## $Id: commands.py,v 1.17 2007/08/28 09:54:15 normanr Exp $
## Ad-Hoc Command manager
## Mike Albon (c) 5th January 2005
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
"""This module is a ad-hoc command processor for xmpppy. It uses the plug-in mechanism like most of the core library. It depends on a DISCO browser manager.
There are 3 classes here, a command processor Commands like the Browser, and a command template plugin Command, and an example command.
To use this module:
Instansiate the module with the parent transport and disco browser manager as parameters.
'Plug in' commands using the command template.
The command feature must be added to existing disco replies where neccessary.
What it supplies:
Automatic command registration with the disco browser manager.
Automatic listing of commands in the public command list.
A means of handling requests, by redirection though the command manager.
"""
from protocol import *
from client import PlugIn
class Commands(PlugIn):
"""Commands is an ancestor of PlugIn and can be attached to any session.
The commands class provides a lookup and browse mechnism. It follows the same priciple of the Browser class, for Service Discovery to provide the list of commands, it adds the 'list' disco type to your existing disco handler function.
How it works:
The commands are added into the existing Browser on the correct nodes. When the command list is built the supplied discovery handler function needs to have a 'list' option in type. This then gets enumerated, all results returned as None are ignored.
The command executed is then called using it's Execute method. All session management is handled by the command itself.
"""
def __init__(self, browser):
"""Initialises class and sets up local variables"""
PlugIn.__init__(self)
DBG_LINE='commands'
self._exported_methods=[]
self._handlers={'':{}}
self._browser = browser
def plugin(self, owner):
"""Makes handlers within the session"""
# Plug into the session and the disco manager
# We only need get and set, results are not needed by a service provider, only a service user.
owner.RegisterHandler('iq',self._CommandHandler,typ='set',ns=NS_COMMANDS)
owner.RegisterHandler('iq',self._CommandHandler,typ='get',ns=NS_COMMANDS)
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid='')
def plugout(self):
"""Removes handlers from the session"""
# unPlug from the session and the disco manager
self._owner.UnregisterHandler('iq',self._CommandHandler,ns=NS_COMMANDS)
for jid in self._handlers:
self._browser.delDiscoHandler(self._DiscoHandler,node=NS_COMMANDS)
def _CommandHandler(self,conn,request):
"""The internal method to process the routing of command execution requests"""
# This is the command handler itself.
# We must:
# Pass on command execution to command handler
# (Do we need to keep session details here, or can that be done in the command?)
jid = str(request.getTo())
try:
node = request.getTagAttr('command','node')
except:
conn.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
if self._handlers.has_key(jid):
if self._handlers[jid].has_key(node):
self._handlers[jid][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif self._handlers[''].has_key(node):
self._handlers[''][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
def _DiscoHandler(self,conn,request,typ):
"""The internal method to process service discovery requests"""
# This is the disco manager handler.
if typ == 'items':
# We must:
# Generate a list of commands and return the list
# * This handler does not handle individual commands disco requests.
# Pseudo:
# Enumerate the 'item' disco of each command for the specified jid
# Build responce and send
# To make this code easy to write we add an 'list' disco type, it returns a tuple or 'none' if not advertised
list = []
items = []
jid = str(request.getTo())
# Get specific jid based results
if self._handlers.has_key(jid):
for each in self._handlers[jid].keys():
items.append((jid,each))
else:
# Get generic results
for each in self._handlers[''].keys():
items.append(('',each))
if items != []:
for each in items:
i = self._handlers[each[0]][each[1]]['disco'](conn,request,'list')
if i != None:
list.append(Node(tag='item',attrs={'jid':i[0],'node':i[1],'name':i[2]}))
iq = request.buildReply('result')
if request.getQuerynode(): iq.setQuerynode(request.getQuerynode())
iq.setQueryPayload(list)
conn.send(iq)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif typ == 'info':
return {'ids':[{'category':'automation','type':'command-list'}],'features':[]}
def addCommand(self,name,cmddisco,cmdexecute,jid=''):
"""The method to call if adding a new command to the session, the requred parameters of cmddisco and cmdexecute are the methods to enable that command to be executed"""
# This command takes a command object and the name of the command for registration
# We must:
# Add item into disco
# Add item into command list
if not self._handlers.has_key(jid):
self._handlers[jid]={}
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid=jid)
if self._handlers[jid].has_key(name):
raise NameError,'Command Exists'
else:
self._handlers[jid][name]={'disco':cmddisco,'execute':cmdexecute}
# Need to add disco stuff here
self._browser.setDiscoHandler(cmddisco,node=name,jid=jid)
def delCommand(self,name,jid=''):
"""Removed command from the session"""
# This command takes a command object and the name used for registration
# We must:
# Remove item from disco
# Remove item from command list
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
if not self._handlers[jid].has_key(name):
raise NameError, 'Command not found'
else:
#Do disco removal here
command = self.getCommand(name,jid)['disco']
del self._handlers[jid][name]
self._browser.delDiscoHandler(command,node=name,jid=jid)
def getCommand(self,name,jid=''):
"""Returns the command tuple"""
# This gets the command object with name
# We must:
# Return item that matches this name
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
elif not self._handlers[jid].has_key(name):
raise NameError,'Command not found'
else:
return self._handlers[jid][name]
class Command_Handler_Prototype(PlugIn):
"""This is a prototype command handler, as each command uses a disco method
and execute method you can implement it any way you like, however this is
my first attempt at making a generic handler that you can hang process
stages on too. There is an example command below.
The parameters are as follows:
name : the name of the command within the jabber environment
description : the natural language description
discofeatures : the features supported by the command
initial : the initial command in the from of {'execute':commandname}
All stages set the 'actions' dictionary for each session to represent the possible options available.
"""
name = 'examplecommand'
count = 0
description = 'an example command'
discofeatures = [NS_COMMANDS,NS_DATA]
# This is the command template
def __init__(self,jid=''):
"""Set up the class"""
PlugIn.__init__(self)
DBG_LINE='command'
self.sessioncount = 0
self.sessions = {}
# Disco information for command list pre-formatted as a tuple
self.discoinfo = {'ids':[{'category':'automation','type':'command-node','name':self.description}],'features': self.discofeatures}
self._jid = jid
def plugin(self,owner):
"""Plug command into the commands class"""
# The owner in this instance is the Command Processor
self._commands = owner
self._owner = owner._owner
self._commands.addCommand(self.name,self._DiscoHandler,self.Execute,jid=self._jid)
def plugout(self):
"""Remove command from the commands class"""
self._commands.delCommand(self.name,self._jid)
def getSessionID(self):
"""Returns an id for the command session"""
self.count = self.count+1
return 'cmd-%s-%d'%(self.name,self.count)
def Execute(self,conn,request):
"""The method that handles all the commands, and routes them to the correct method for that stage."""
# New request or old?
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
try:
action = request.getTagAttr('command','action')
except:
action = None
if action == None: action = 'execute'
# Check session is in session list
if self.sessions.has_key(session):
if self.sessions[session]['jid']==request.getFrom():
# Check action is vaild
if self.sessions[session]['actions'].has_key(action):
# Execute next action
self.sessions[session]['actions'][action](conn,request)
else:
# Stage not presented as an option
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# Jid and session don't match. Go away imposter
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
elif session != None:
# Not on this sessionid you won't.
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# New session
self.initial[action](conn,request)
def _DiscoHandler(self,conn,request,type):
"""The handler for discovery events"""
if type == 'list':
return (request.getTo(),self.name,self.description)
elif type == 'items':
return []
elif type == 'info':
return self.discoinfo
class TestCommand(Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works.
Generally, it presents a "master" that giudes user through to calculate something.
"""
name = 'testcommand'
description = 'a noddy example command'
def __init__(self,jid=''):
""" Init internal constants. """
Command_Handler_Prototype.__init__(self,jid)
self.initial = {'execute':self.cmdFirstStage}
def cmdFirstStage(self,conn,request):
""" Determine """
# This is the only place this should be repeated as all other stages should have SessionIDs
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
if session == None:
session = self.getSessionID()
self.sessions[session]={'jid':request.getFrom(),'actions':{'cancel':self.cmdCancel,'next':self.cmdSecondStage,'execute':self.cmdSecondStage},'data':{'type':None}}
# As this is the first stage we only send a form
reply = request.buildReply('result')
form = DataForm(title='Select type of operation',data=['Use the combobox to select the type of calculation you would like to do, then click Next',DataField(name='calctype',desc='Calculation Type',value=self.sessions[session]['data']['type'],options=[['circlediameter','Calculate the Diameter of a circle'],['circlearea','Calculate the area of a circle']],typ='list-single',required=1)])
replypayload = [Node('actions',attrs={'execute':'next'},payload=[Node('next')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':session,'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdSecondStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
self.sessions[request.getTagAttr('command','sessionid')]['data']['type']=form.getField('calctype').getValue()
self.sessions[request.getTagAttr('command','sessionid')]['actions']={'cancel':self.cmdCancel,None:self.cmdThirdStage,'previous':self.cmdFirstStage,'execute':self.cmdThirdStage,'next':self.cmdThirdStage}
# The form generation is split out to another method as it may be called by cmdThirdStage
self.cmdSecondStageReply(conn,request)
def cmdSecondStageReply(self,conn,request):
reply = request.buildReply('result')
form = DataForm(title = 'Enter the radius', data=['Enter the radius of the circle (numbers only)',DataField(desc='Radius',name='radius',typ='text-single')])
replypayload = [Node('actions',attrs={'execute':'complete'},payload=[Node('complete'),Node('prev')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdThirdStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
try:
num = float(form.getField('radius').getValue())
except:
self.cmdSecondStageReply(conn,request)
from math import pi
if self.sessions[request.getTagAttr('command','sessionid')]['data']['type'] == 'circlearea':
result = (num**2)*pi
else:
result = num*2*pi
reply = request.buildReply('result')
form = DataForm(typ='result',data=[DataField(desc='result',name='result',value=result)])
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'completed'},payload=[form])
self._owner.send(reply)
raise NodeProcessed
def cmdCancel(self,conn,request):
reply = request.buildReply('result')
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'cancelled'})
self._owner.send(reply)
del self.sessions[request.getTagAttr('command','sessionid')]
|
gpl-3.0
|
yl565/statsmodels
|
statsmodels/stats/contingency_tables.py
|
4
|
43623
|
"""
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of contingency tables.
Also contains functions for conducting Mcnemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
from __future__ import division
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels import iolib
from statsmodels.tools.sm_exceptions import SingularMatrixWarning
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if table.shape[0] != table.shape[1]:
ix = list(set(table.index) | set(table.columns))
table = table.reindex(ix, axis=0)
table = table.reindex(ix, axis=1)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch(object):
def __repr__(self):
return "<bunch object containing statsmodels results>"
class Table(object):
"""
Analyses that can be performed on a two-way contingency table.
Parameters
----------
table : array-like
A contingency table.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array-like
The original table is cached as `table_orig`.
marginal_probabilities : tuple of two ndarrays
The estimated row and column marginal distributions.
independence_probabilities : ndarray
Estimated cell probabilities under row/column independence.
fittedvalues : ndarray
Fitted values under independence.
resid_pearson : ndarray
The Pearson residuals under row/column independence.
standardized_resids : ndarray
Residuals for the independent row/column model with approximate
unit variance.
chi2_contribs : ndarray
The contribution of each cell to the chi^2 statistic.
local_logodds_ratios : ndarray
The local log odds ratios are calculated for each 2x2 subtable
formed from adjacent rows and columns.
local_oddsratios : ndarray
The local odds ratios are calculated from each 2x2 subtable
formed from adjacent rows and columns.
cumulative_log_oddsratios : ndarray
The cumulative log odds ratio at a given pair of thresholds is
calculated by reducing the table to a 2x2 table based on
dichotomizing the rows and columns at the given thresholds.
The table of cumulative log odds ratios presents all possible
cumulative log odds ratios that can be formed from a given
table.
cumulative_oddsratios : ndarray
The cumulative odds ratios are calculated by reducing the
table to a 2x2 table based on cutting the rows and columns at
a given point. The table of cumulative odds ratios presents
all possible cumulative odds ratios that can be formed from a
given table.
See also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
----------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table = self.table + 0.5
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : integer
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array-like
An array of numeric row scores
col_scores : array-like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores) != self.table.shape[0]:
raise ValueError("The length of `row_scores` must match the first dimension of `table`.")
if len(col_scores) != self.table.shape[1]:
raise ValueError("The length of `col_scores` must match the second dimension of `table`.")
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
# docstring for cached attributes in init above
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
# docstring for cached attributes in init above
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
# docstring for cached attributes in init above
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
# docstring for cached attributes in init above
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array-like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table. Otherwise the table should be provided in
a square form, with the (implicit) row and column categories
appearing in the same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1 != k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
A bunch with attributes:
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower() != "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : string
Either 'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns a bunch with attributes:
statistic : float
The chi^2 test statistic
pvalue : float
The p-value of the test statistic
df : integer
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
import warnings
warnings.warn("Unable to invert covariance matrix",
SingularMatrixWarning)
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : string
Used to format numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array-like
A 2x2 contingency table
shift_zeros : boolean
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Attributes
----------
log_oddsratio : float
The log odds ratio of the table.
log_oddsratio_se : float
The asymptotic standard error of the estimated log odds ratio.
oddsratio : float
The odds ratio of the table.
riskratio : float
The ratio between the risk in the first row and the risk in
the second row. Column 0 is interpreted as containing the
number of occurences of the event of interest.
log_riskratio : float
The estimated log risk ratio for the table.
log_riskratio_se : float
The standard error of the estimated log risk ratio for the
table.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if (table.ndim != 2) or (table.shape[0] != 2) or (table.shape[1] != 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : boolean
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
# docstring for cached attributes in init above
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
# docstring for cached attributes in init above
return self.table[0, 0] * self.table[1, 1] / (self.table[0, 1] * self.table[1, 0])
@cache_readonly
def log_oddsratio_se(self):
# docstring for cached attributes in init above
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Parameters
----------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Parameters
----------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
# docstring for cached attributes in init above
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
# docstring for cached attributes in init above
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
# docstring for cached attributes in init above
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Parameters
----------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Parameters
----------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : string
Used to format the numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio", "Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se, lcb2, ucb2,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb2, ucb2, self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se, lcb4, ucb4,
self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable(object):
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Attributes
----------
logodds_pooled : float
An estimate of the pooled log odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all the tables.
log_oddsratio_se : float
The estimated standard error of the pooled log odds ratio,
following Robins, Breslow and Greenland (Biometrics
42:311-323).
oddsratio_pooled : float
An estimate of the pooled odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all tables.
risk_pooled : float
An estimate of the pooled risk ratio. This is an estimate of
a risk ratio that is common to all the tables.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = resettable_cache()
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` containing the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` containing the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index of name of `data` containing the variable
defining the strata.
data : array-like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
A StratifiedTable instance.
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=data.index, column=[var1, var2, strata])
data1.loc[:, var1] = data[:, var1]
data1.loc[:, var2] = data[:, var2]
data1.loc[:, strata] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
tables.append(tab)
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : boolean
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] - self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
# doc for cached attributes in init above
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
# doc for cached attributes in init above
return np.log(self.oddsratio_pooled)
@cache_readonly
def risk_pooled(self):
# doc for cached attributes in init above
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def logodds_pooled_se(self):
# doc for cached attributes in init above
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) * self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : boolean
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
e11 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
# Variance of the first cell
v11 = 1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) + 1 / (self._dma + e11)
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : string
Used for formatting numeric values in the summary.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.risk_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array-like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pvalue = stats.binom.cdf(statistic, n1 + n2, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : boolean
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
# From the SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
#q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df
|
bsd-3-clause
|
Gui13/CouchPotatoServer
|
couchpotato/core/media/_base/media/main.py
|
2
|
16591
|
import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex
log = CPLog(__name__)
class MediaPlugin(MediaBase):
_database = {
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
'media_children': MediaChildrenIndex,
}
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView, priority = 100)
addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus)
addEvent('media.with_identifiers', self.withIdentifiers)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
def refresh(self, id = '', **kwargs):
handlers = []
ids = splitString(id)
for x in ids:
refresh_handler = self.createRefreshHandler(x)
if refresh_handler:
handlers.append(refresh_handler)
fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids})
fireEventAsync('schedule.queue', handlers = handlers)
return {
'success': True,
}
def createRefreshHandler(self, media_id):
try:
media = get_db().get('id', media_id)
event = '%s.update_info' % media.get('type')
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
try:
db = get_db()
imdb_id = getImdb(str(media_id))
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
except RecordNotFound:
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def withStatus(self, status, with_doc = True):
db = get_db()
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s, with_doc = with_doc):
yield ms['doc'] if with_doc else ms
def withIdentifiers(self, identifiers, with_doc = False):
db = get_db()
for x in identifiers:
try:
media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
return media
except:
pass
log.debug('No media found with identifiers: %s', identifiers)
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, starts_with = None, search = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = list(all_media_ids)
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Add search filters
if starts_with:
filter_by['starts_with'] = set()
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
if status_or and 'media_status' in filter_by and 'release_status' in filter_by:
filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status'])
del filter_by['media_status']
del filter_by['release_status']
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
total_count = len(media_ids)
if total_count == 0:
return 0, []
offset = 0
limit = -1
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
# List movies based on title order
medias = []
for m in db.all('media_title'):
media_id = m['_id']
if media_id not in media_ids: continue
if offset > 0:
offset -= 1
continue
media = fireEvent('media.get', media_id, single = True)
# Merge releases with movie dict
medias.append(media)
# remove from media ids
media_ids.remove(media_id)
if len(media_ids) == 0 or len(medias) == limit: break
return total_count, medias
def listView(self, **kwargs):
total_movies, movies = self.list(
types = splitString(kwargs.get('type')),
status = splitString(kwargs.get('status')),
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList)
def availableChars(self, types = None, status = None, release_status = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = all_media_ids
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
chars = set()
for x in db.all('media_startswith'):
if x['_id'] in media_ids:
chars.add(x['key'])
if len(chars) == 25:
break
return list(chars)
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
try:
db = get_db()
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
for release in media_releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.get('status') != 'done':
db.delete(release)
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active') or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
db.update(media)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
if deleted:
fireEvent('notify.frontend', type = 'media.deleted', data = media)
except:
log.error('Failed deleting media: %s', traceback.format_exc())
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
def restatus(self, media_id):
try:
db = get_db()
m = db.get('id', media_id)
previous_status = m['status']
log.debug('Changing status for %s', getTitle(m))
if not m['profile_id']:
m['status'] = 'done'
else:
move_to_wanted = True
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
for q_identifier in profile['qualities']:
index = profile['qualities'].index(q_identifier)
for release in media_releases:
if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]):
move_to_wanted = False
m['status'] = 'active' if move_to_wanted else 'done'
# Only update when status has changed
if previous_status != m['status']:
db.update(m)
return True
except:
log.error('Failed restatus: %s', traceback.format_exc())
|
gpl-3.0
|
gorcz/security_monkey
|
security_monkey/watchers/iam/iam_group.py
|
2
|
6319
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.iam.iam_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.exceptions import InvalidAWSJSON
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
import json
import urllib
def all_managed_policies(conn):
managed_policies = {}
for policy in conn.policies.all():
for attached_group in policy.attached_groups.all():
policy = {
"name": policy.policy_name,
"arn": policy.arn,
"version": policy.default_version_id
}
if attached_group.arn not in managed_policies:
managed_policies[attached_group.arn] = [policy]
else:
managed_policies[attached_group.arn].append(policy)
return managed_policies
class IAMGroup(Watcher):
index = 'iamgroup'
i_am_singular = 'IAM Group'
i_am_plural = 'IAM Groups'
def __init__(self, accounts=None, debug=False):
super(IAMGroup, self).__init__(accounts=accounts, debug=debug)
def get_all_groups(self, conn):
all_groups = []
marker = None
while True:
groups_response = self.wrap_aws_rate_limited_call(
conn.get_all_groups,
marker=marker
)
all_groups.extend(groups_response.groups)
if hasattr(groups_response, 'marker'):
marker = groups_response.marker
else:
break
return all_groups
def get_all_group_policies(self, conn, group_name):
all_group_policies = []
marker = None
while True:
group_policies = self.wrap_aws_rate_limited_call(
conn.get_all_group_policies,
group_name,
marker=marker
)
all_group_policies.extend(group_policies.policy_names)
if hasattr(group_policies, 'marker'):
marker = group_policies.marker
else:
break
return all_group_policies
def get_all_group_users(self, conn, group_name):
all_group_users = []
marker = None
while True:
group_users_response = self.wrap_aws_rate_limited_call(
conn.get_group,
group_name,
marker=marker
)
all_group_users.extend(group_users_response.users)
if hasattr(group_users_response, 'marker'):
marker = group_users_response.marker
else:
break
return all_group_users
def slurp(self):
"""
:returns: item_list - list of IAM Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
try:
iam_b3 = connect(account, 'iam_boto3')
managed_policies = all_managed_policies(iam_b3)
iam = connect(account, 'iam')
groups = self.get_all_groups(iam)
except Exception as e:
exc = BotoConnectionIssue(str(e), 'iamgroup', account, None)
self.slurp_exception((self.index, account, 'universal'), exc, exception_map)
continue
for group in groups:
app.logger.debug("Slurping %s (%s) from %s" % (self.i_am_singular, group.group_name, account))
if self.check_ignore_list(group.group_name):
continue
item_config = {
'group': dict(group),
'grouppolicies': {},
'users': {}
}
if managed_policies.has_key(group.arn):
item_config['managed_policies'] = managed_policies.get(group.arn)
### GROUP POLICIES ###
group_policies = self.get_all_group_policies(iam, group.group_name)
for policy_name in group_policies:
policy = self.wrap_aws_rate_limited_call(iam.get_group_policy, group.group_name, policy_name)
policy = policy.policy_document
policy = urllib.unquote(policy)
try:
policydict = json.loads(policy)
except:
exc = InvalidAWSJSON(policy)
self.slurp_exception((self.index, account, 'universal', group.group_name), exc, exception_map)
item_config['grouppolicies'][policy_name] = dict(policydict)
### GROUP USERS ###
group_users = self.get_all_group_users(iam, group['group_name'])
for user in group_users:
item_config['users'][user.arn] = user.user_name
item = IAMGroupItem(account=account, name=group.group_name, config=item_config)
item_list.append(item)
return item_list, exception_map
class IAMGroupItem(ChangeItem):
def __init__(self, account=None, name=None, config={}):
super(IAMGroupItem, self).__init__(
index=IAMGroup.index,
region='universal',
account=account,
name=name,
new_config=config)
|
apache-2.0
|
andmos/ansible
|
test/units/modules/network/netvisor/test_pn_stp.py
|
9
|
2167
|
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_stp
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestStpModule(TestNvosModule):
module = pn_stp
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_stp.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'stp-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
def test_stp_modify_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_hello_time': '3',
'pn_stp_mode': 'rstp', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 stp-modify hello-time 3 root-guard-wait-time 20 mst-max-hops 20 max-age 20 '
expected_cmd += 'stp-mode rstp forwarding-delay 15 bridge-priority 32768'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_stp_modify_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_root_guard_wait_time': '50',
'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 stp-modify hello-time 2 root-guard-wait-time 50 mst-max-hops 20 '
expected_cmd += 'max-age 20 forwarding-delay 15 bridge-priority 32768'
self.assertEqual(result['cli_cmd'], expected_cmd)
|
gpl-3.0
|
timoschwarzer/blendworks
|
BlendWorks Server/python/Lib/shelve.py
|
83
|
8428
|
"""Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except (NameError, TypeError):
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
|
gpl-2.0
|
danakj/chromium
|
third_party/closure_linter/closure_linter/tokenutil_test.py
|
109
|
7678
|
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the scopeutil module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('[email protected] (Nathan Naze)')
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
class FakeToken(object):
pass
class TokenUtilTest(googletest.TestCase):
def testGetTokenRange(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.next = b
b.next = c
c.next = d
self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
# This is an error as e does not come after a in the token chain.
self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
def testTokensToString(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.string = 'aaa'
b.string = 'bbb'
c.string = 'ccc'
d.string = 'ddd'
e.string = 'eee'
a.line_number = 5
b.line_number = 6
c.line_number = 6
d.line_number = 10
e.line_number = 11
self.assertEquals(
'aaa\nbbbccc\n\n\n\nddd\neee',
tokenutil.TokensToString([a, b, c, d, e]))
self.assertEquals(
'ddd\neee\naaa\nbbbccc',
tokenutil.TokensToString([d, e, a, b, c]),
'Neighboring tokens not in line_number order should have a newline '
'between them.')
def testGetPreviousCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
None,
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start1.',
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
def testGetNextCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'end1',
tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
def testGetIdentifierStart(self):
tokens = testutil.TokenizeSource("""
start1 . // comment
prototype. /* another comment */
end1
['edge'][case].prototype.
end2 = function() {}
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
def testInsertTokenBefore(self):
self.AssertInsertTokenAfterBefore(False)
def testInsertTokenAfter(self):
self.AssertInsertTokenAfterBefore(True)
def AssertInsertTokenAfterBefore(self, after):
new_token = javascripttokens.JavaScriptToken(
'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
existing_token1 = javascripttokens.JavaScriptToken(
'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
existing_token1.start_index = 0
existing_token1.metadata = ecmametadatapass.EcmaMetaData()
existing_token2 = javascripttokens.JavaScriptToken(
' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
existing_token2.start_index = 3
existing_token2.metadata = ecmametadatapass.EcmaMetaData()
existing_token2.metadata.last_code = existing_token1
existing_token1.next = existing_token2
existing_token2.previous = existing_token1
if after:
tokenutil.InsertTokenAfter(new_token, existing_token1)
else:
tokenutil.InsertTokenBefore(new_token, existing_token2)
self.assertEquals(existing_token1, new_token.previous)
self.assertEquals(existing_token2, new_token.next)
self.assertEquals(new_token, existing_token1.next)
self.assertEquals(new_token, existing_token2.previous)
self.assertEquals(existing_token1, new_token.metadata.last_code)
self.assertEquals(new_token, existing_token2.metadata.last_code)
self.assertEquals(0, existing_token1.start_index)
self.assertEquals(3, new_token.start_index)
self.assertEquals(4, existing_token2.start_index)
def testGetIdentifierForToken(self):
tokens = testutil.TokenizeSource("""
start1.abc.def.prototype.
onContinuedLine
(start2.abc.def
.hij.klm
.nop)
start3.abc.def
.hij = function() {};
// An absurd multi-liner.
start4.abc.def.
hij.
klm = function() {};
start5 . aaa . bbb . ccc
shouldntBePartOfThePreviousSymbol
start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
var start7 = 42;
function start8() {
}
start9.abc. // why is there a comment here?
def /* another comment */
shouldntBePart
start10.abc // why is there a comment here?
.def /* another comment */
shouldntBePart
start11.abc. middle1.shouldNotBeIdentifier
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1.abc.def.prototype.onContinuedLine',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start2.abc.def.hij.klm.nop',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
self.assertEquals(
'start3.abc.def.hij',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
self.assertEquals(
'start4.abc.def.hij.klm',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
self.assertEquals(
'start5.aaa.bbb.ccc',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
self.assertEquals(
'start6.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
self.assertEquals(
'start7',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
self.assertEquals(
'start8',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
self.assertEquals(
'start9.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
self.assertEquals(
'start10.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
self.assertIsNone(
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
if __name__ == '__main__':
googletest.main()
|
bsd-3-clause
|
Qalthos/ansible
|
lib/ansible/modules/storage/netapp/na_elementsw_network_interfaces.py
|
44
|
10836
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Node Network Interfaces - Bond 1G and 10G configuration
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_network_interfaces
short_description: NetApp Element Software Configure Node Network Interfaces
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Configure Element SW Node Network Interfaces for Bond 1G and 10G IP address.
options:
method:
description:
- Type of Method used to configure the interface.
- method depends on other settings such as the use of a static IP address, which will change the method to static.
- loopback - Used to define the IPv4 loopback interface.
- manual - Used to define interfaces for which no configuration is done by default.
- dhcp - May be used to obtain an IP address via DHCP.
- static - Used to define Ethernet interfaces with statically allocated IPv4 addresses.
choices: ['loopback', 'manual', 'dhcp', 'static']
required: true
ip_address_1g:
description:
- IP address for the 1G network.
required: true
ip_address_10g:
description:
- IP address for the 10G network.
required: true
subnet_1g:
description:
- 1GbE Subnet Mask.
required: true
subnet_10g:
description:
- 10GbE Subnet Mask.
required: true
gateway_address_1g:
description:
- Router network address to send packets out of the local network.
required: true
gateway_address_10g:
description:
- Router network address to send packets out of the local network.
required: true
mtu_1g:
description:
- Maximum Transmission Unit for 1GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
mtu_10g:
description:
- Maximum Transmission Unit for 10GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
dns_nameservers:
description:
- List of addresses for domain name servers.
dns_search_domains:
description:
- List of DNS search domains.
bond_mode_1g:
description:
- Bond mode for 1GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
bond_mode_10g:
description:
- Bond mode for 10GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
lacp_1g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
lacp_10g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
virtual_network_tag:
description:
- This is the primary network tag. All nodes in a cluster have the same VLAN tag.
'''
EXAMPLES = """
- name: Set Node network interfaces configuration for Bond 1G and 10G properties
tags:
- elementsw_network_interfaces
na_elementsw_network_interfaces:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
method: static
ip_address_1g: 10.226.109.68
ip_address_10g: 10.226.201.72
subnet_1g: 255.255.255.0
subnet_10g: 255.255.255.0
gateway_address_1g: 10.193.139.1
gateway_address_10g: 10.193.140.1
mtu_1g: 1500
mtu_10g: 9000
bond_mode_1g: ActivePassive
bond_mode_10g: LACP
lacp_10g: Fast
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
from solidfire.models import Network, NetworkConfig
HAS_SF_SDK = True
except Exception:
HAS_SF_SDK = False
class ElementSWNetworkInterfaces(object):
"""
Element Software Network Interfaces - Bond 1G and 10G Network configuration
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(
method=dict(type='str', required=True, choices=['loopback', 'manual', 'dhcp', 'static']),
ip_address_1g=dict(type='str', required=True),
ip_address_10g=dict(type='str', required=True),
subnet_1g=dict(type='str', required=True),
subnet_10g=dict(type='str', required=True),
gateway_address_1g=dict(type='str', required=True),
gateway_address_10g=dict(type='str', required=True),
mtu_1g=dict(type='str', default='1500'),
mtu_10g=dict(type='str', default='1500'),
dns_nameservers=dict(type='list'),
dns_search_domains=dict(type='list'),
bond_mode_1g=dict(type='str', default='ActivePassive', choices=['ActivePassive', 'ALB', 'LACP']),
bond_mode_10g=dict(type='str', default='ActivePassive', choices=['ActivePassive', 'ALB', 'LACP']),
lacp_1g=dict(type='str', default='Slow', choices=['Fast', 'Slow']),
lacp_10g=dict(type='str', default='Slow', choices=['Fast', 'Slow']),
virtual_network_tag=dict(type='str'),
)
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
)
input_params = self.module.params
self.method = input_params['method']
self.ip_address_1g = input_params['ip_address_1g']
self.ip_address_10g = input_params['ip_address_10g']
self.subnet_1g = input_params['subnet_1g']
self.subnet_10g = input_params['subnet_10g']
self.gateway_address_1g = input_params['gateway_address_1g']
self.gateway_address_10g = input_params['gateway_address_10g']
self.mtu_1g = input_params['mtu_1g']
self.mtu_10g = input_params['mtu_10g']
self.dns_nameservers = input_params['dns_nameservers']
self.dns_search_domains = input_params['dns_search_domains']
self.bond_mode_1g = input_params['bond_mode_1g']
self.bond_mode_10g = input_params['bond_mode_10g']
self.lacp_1g = input_params['lacp_1g']
self.lacp_10g = input_params['lacp_10g']
self.virtual_network_tag = input_params['virtual_network_tag']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442)
def set_network_config(self):
"""
set network configuration
"""
try:
self.sfe.set_network_config(network=self.network_object)
except Exception as exception_object:
self.module.fail_json(msg='Error network setting for node %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def get_network_params_object(self):
"""
Get Element SW Network object
:description: get Network object
:return: NetworkConfig object
:rtype: object(NetworkConfig object)
"""
try:
bond_1g_network = NetworkConfig(method=self.method,
address=self.ip_address_1g,
netmask=self.subnet_1g,
gateway=self.gateway_address_1g,
mtu=self.mtu_1g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_1g,
bond_lacp_rate=self.lacp_1g,
virtual_network_tag=self.virtual_network_tag)
bond_10g_network = NetworkConfig(method=self.method,
address=self.ip_address_10g,
netmask=self.subnet_10g,
gateway=self.gateway_address_10g,
mtu=self.mtu_10g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_10g,
bond_lacp_rate=self.lacp_10g,
virtual_network_tag=self.virtual_network_tag)
network_object = Network(bond1_g=bond_1g_network,
bond10_g=bond_10g_network)
return network_object
except Exception as e:
self.module.fail_json(msg='Error with setting up network object for node 1G and 10G configuration : %s' % to_native(e),
exception=to_native(e))
def apply(self):
"""
Check connection and initialize node with cluster ownership
"""
changed = False
result_message = None
self.network_object = self.get_network_params_object()
if self.network_object is not None:
self.set_network_config()
changed = True
else:
result_message = "Skipping changes, No change requested"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""
Main function
"""
elementsw_network_interfaces = ElementSWNetworkInterfaces()
elementsw_network_interfaces.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
csmart/jockey-yum
|
setup.py
|
1
|
1204
|
#!/usr/bin/env python
# (c) 2007 Canonical Ltd.
# Author: Martin Pitt <[email protected]>
# This script needs python-distutils-extra, an extension to the standard
# distutils which provides i18n, icon support, etc.
# https://launchpad.net/python-distutils-extra
from glob import glob
from distutils.version import StrictVersion
try:
import DistUtilsExtra.auto
except ImportError:
import sys
print >> sys.stderr, 'To build Jockey you need https://launchpad.net/python-distutils-extra'
sys.exit(1)
assert StrictVersion(DistUtilsExtra.auto.__version__) >= '2.4', 'needs DistUtilsExtra.auto >= 2.4'
DistUtilsExtra.auto.setup(
name='jockey',
version='0.9.3',
description='UI for managing third-party and non-free drivers',
url='https://launchpad.net/jockey',
license='GPL v2 or later',
author='Martin Pitt',
author_email='[email protected]',
data_files = [
('share/jockey', ['backend/jockey-backend']),
('share/jockey', ['gtk/jockey-gtk.ui']), # bug in DistUtilsExtra.auto 2.2
('share/jockey', glob('kde/*.ui')), # don't use pykdeuic4
],
scripts = ['gtk/jockey-gtk', 'kde/jockey-kde', 'text/jockey-text'],
)
|
gpl-2.0
|
MarsSnail/gyp_tools
|
pylib/gyp/MSVSToolFile.py
|
2736
|
1804
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
bsd-3-clause
|
gersolar/stations
|
stations_configuration/settings.py
|
1
|
5198
|
# Only Celery settings for stations project.
#import djcelery
#djcelery.setup_loader()
#BROKER_TRANSPORT = 'amqplib'
#BROKER_URL = 'django://'
##CELERY_RESULT_BACKEND = 'database'
#CELERY_DEFAULT_QUEUE = "default"
#CELERY_QUEUES = {
# "default": {
# "binding_key": "task.#",
# },
# "mailer": {
# "binding_key": "task.#",
# },
#}
#CELERY_ROUTES = {'downloader.tasks.check_email_schedule': {'queue': 'mailer'}}
#CELERY_TIMEZONE = 'UTC'
#CELERY_CONCURRENCY = 7
# Django settings for stations project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'stations.sqlite3',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC' # 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fax%_3d9oshwed$!3s)jdn876jpj#5u&50m$6naau#&=zpyn%0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stations_configuration.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'stations_configuration.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic',
'django.contrib.contenttypes',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'stations',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_DIRS='templates'
|
mit
|
ujenmr/ansible
|
lib/ansible/modules/remote_management/oneview/oneview_ethernet_network_facts.py
|
125
|
4863
|
#!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network_facts
short_description: Retrieve the facts about one or more of the OneView Ethernet Networks
description:
- Retrieve the facts about one or more of the Ethernet Networks from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Ethernet Network name.
options:
description:
- "List with options to gather additional facts about an Ethernet Network and related resources.
Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather paginated and filtered facts about Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
params:
start: 1
count: 3
sort: 'name:descending'
filter: 'purpose=General'
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: Ethernet network name
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name with options
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: eth1
options:
- associatedProfiles
- associatedUplinkGroups
delegate_to: localhost
- debug: var=enet_associated_profiles
- debug: var=enet_associated_uplink_groups
'''
RETURN = '''
ethernet_networks:
description: Has all the OneView facts about the Ethernet Networks.
returned: Always, but can be null.
type: dict
enet_associated_profiles:
description: Has all the OneView facts about the profiles which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
enet_associated_uplink_groups:
description: Has all the OneView facts about the uplink sets which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class EthernetNetworkFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict')
)
def __init__(self):
super(EthernetNetworkFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
ansible_facts = {}
if self.module.params['name']:
ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
if self.module.params.get('options') and ethernet_networks:
ansible_facts = self.__gather_optional_facts(ethernet_networks[0])
else:
ethernet_networks = self.resource_client.get_all(**self.facts_params)
ansible_facts['ethernet_networks'] = ethernet_networks
return dict(changed=False, ansible_facts=ansible_facts)
def __gather_optional_facts(self, ethernet_network):
ansible_facts = {}
if self.options.get('associatedProfiles'):
ansible_facts['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
if self.options.get('associatedUplinkGroups'):
ansible_facts['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
return ansible_facts
def __get_associated_profiles(self, ethernet_network):
associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
def __get_associated_uplink_groups(self, ethernet_network):
uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
def main():
EthernetNetworkFactsModule().run()
if __name__ == '__main__':
main()
|
gpl-3.0
|
gram526/VTK
|
Filters/Hybrid/Testing/Python/WarpPolyData.py
|
20
|
6369
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this example tests the warping of PolyData using thin plate splines
# and with grid transforms using different interpolation modes
# create a rendering window
renWin = vtk.vtkRenderWindow()
renWin.SetSize(600,300)
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(20)
sphere.SetPhiResolution(20)
ap = vtk.vtkPolyDataNormals()
ap.SetInputConnection(sphere.GetOutputPort())
#---------------------------
# thin plate spline transform
spoints = vtk.vtkPoints()
spoints.SetNumberOfPoints(10)
spoints.SetPoint(0,0.000,0.000,0.500)
spoints.SetPoint(1,0.000,0.000,-0.500)
spoints.SetPoint(2,0.433,0.000,0.250)
spoints.SetPoint(3,0.433,0.000,-0.250)
spoints.SetPoint(4,-0.000,0.433,0.250)
spoints.SetPoint(5,-0.000,0.433,-0.250)
spoints.SetPoint(6,-0.433,-0.000,0.250)
spoints.SetPoint(7,-0.433,-0.000,-0.250)
spoints.SetPoint(8,0.000,-0.433,0.250)
spoints.SetPoint(9,0.000,-0.433,-0.250)
tpoints = vtk.vtkPoints()
tpoints.SetNumberOfPoints(10)
tpoints.SetPoint(0,0.000,0.000,0.800)
tpoints.SetPoint(1,0.000,0.000,-0.200)
tpoints.SetPoint(2,0.433,0.000,0.350)
tpoints.SetPoint(3,0.433,0.000,-0.150)
tpoints.SetPoint(4,-0.000,0.233,0.350)
tpoints.SetPoint(5,-0.000,0.433,-0.150)
tpoints.SetPoint(6,-0.433,-0.000,0.350)
tpoints.SetPoint(7,-0.433,-0.000,-0.150)
tpoints.SetPoint(8,0.000,-0.233,0.350)
tpoints.SetPoint(9,0.000,-0.433,-0.150)
thin = vtk.vtkThinPlateSplineTransform()
thin.SetSourceLandmarks(spoints)
thin.SetTargetLandmarks(tpoints)
thin.SetBasisToR2LogR()
# thin Inverse
t1 = vtk.vtkGeneralTransform()
t1.SetInput(thin)
f11 = vtk.vtkTransformPolyDataFilter()
f11.SetInputConnection(ap.GetOutputPort())
f11.SetTransform(t1)
m11 = vtk.vtkDataSetMapper()
m11.SetInputConnection(f11.GetOutputPort())
a11 = vtk.vtkActor()
a11.SetMapper(m11)
a11.RotateY(90)
a11.GetProperty().SetColor(1,0,0)
#[a11 GetProperty] SetRepresentationToWireframe
ren11 = vtk.vtkRenderer()
ren11.SetViewport(0.0,0.5,0.25,1.0)
ren11.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren11.AddActor(a11)
renWin.AddRenderer(ren11)
# inverse thin plate spline transform
f12 = vtk.vtkTransformPolyDataFilter()
f12.SetInputConnection(ap.GetOutputPort())
f12.SetTransform(t1.GetInverse())
m12 = vtk.vtkDataSetMapper()
m12.SetInputConnection(f12.GetOutputPort())
a12 = vtk.vtkActor()
a12.SetMapper(m12)
a12.RotateY(90)
a12.GetProperty().SetColor(0.9,0.9,0)
#[a12 GetProperty] SetRepresentationToWireframe
ren12 = vtk.vtkRenderer()
ren12.SetViewport(0.0,0.0,0.25,0.5)
ren12.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren12.AddActor(a12)
renWin.AddRenderer(ren12)
#--------------------------
# grid transform, cubic interpolation
gridTrans = vtk.vtkTransformToGrid()
gridTrans.SetInput(t1)
gridTrans.SetGridOrigin(-1.5,-1.5,-1.5)
gridTrans.SetGridExtent(0,60,0,60,0,60)
gridTrans.SetGridSpacing(0.05,0.05,0.05)
t2 = vtk.vtkGridTransform()
t2.SetDisplacementGridConnection(gridTrans.GetOutputPort())
t2.SetInterpolationModeToCubic()
f21 = vtk.vtkTransformPolyDataFilter()
f21.SetInputConnection(ap.GetOutputPort())
f21.SetTransform(t2)
m21 = vtk.vtkDataSetMapper()
m21.SetInputConnection(f21.GetOutputPort())
a21 = vtk.vtkActor()
a21.SetMapper(m21)
a21.RotateY(90)
a21.GetProperty().SetColor(1,0,0)
#[a21 GetProperty] SetRepresentationToWireframe
ren21 = vtk.vtkRenderer()
ren21.SetViewport(0.25,0.5,0.50,1.0)
ren21.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren21.AddActor(a21)
renWin.AddRenderer(ren21)
# inverse
f22 = vtk.vtkTransformPolyDataFilter()
f22.SetInputConnection(ap.GetOutputPort())
f22.SetTransform(t2.GetInverse())
m22 = vtk.vtkDataSetMapper()
m22.SetInputConnection(f22.GetOutputPort())
a22 = vtk.vtkActor()
a22.SetMapper(m22)
a22.RotateY(90)
a22.GetProperty().SetColor(0.9,0.9,0)
#[a22 GetProperty] SetRepresentationToWireframe
ren22 = vtk.vtkRenderer()
ren22.SetViewport(0.25,0.0,0.50,0.5)
ren22.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren22.AddActor(a22)
renWin.AddRenderer(ren22)
#--------------------------
# grid transform, linear
t3 = vtk.vtkGridTransform()
t3.SetDisplacementGridConnection(gridTrans.GetOutputPort())
t3.SetInterpolationModeToLinear()
f31 = vtk.vtkTransformPolyDataFilter()
f31.SetInputConnection(ap.GetOutputPort())
f31.SetTransform(t3)
m31 = vtk.vtkDataSetMapper()
m31.SetInputConnection(f31.GetOutputPort())
a31 = vtk.vtkActor()
a31.SetMapper(m31)
a31.RotateY(90)
a31.GetProperty().SetColor(1,0,0)
#[a31 GetProperty] SetRepresentationToWireframe
ren31 = vtk.vtkRenderer()
ren31.SetViewport(0.50,0.5,0.75,1.0)
ren31.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren31.AddActor(a31)
renWin.AddRenderer(ren31)
# inverse
f32 = vtk.vtkTransformPolyDataFilter()
f32.SetInputConnection(ap.GetOutputPort())
f32.SetTransform(t3.GetInverse())
m32 = vtk.vtkDataSetMapper()
m32.SetInputConnection(f32.GetOutputPort())
a32 = vtk.vtkActor()
a32.SetMapper(m32)
a32.RotateY(90)
a32.GetProperty().SetColor(0.9,0.9,0)
#[a32 GetProperty] SetRepresentationToWireframe
ren32 = vtk.vtkRenderer()
ren32.SetViewport(0.5,0.0,0.75,0.5)
ren32.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren32.AddActor(a32)
renWin.AddRenderer(ren32)
#--------------------------
# grid transform, nearest
t4 = vtk.vtkGridTransform()
t4.SetDisplacementGridConnection(gridTrans.GetOutputPort())
t4.SetInterpolationModeToNearestNeighbor()
t4.SetInverseTolerance(0.05)
f41 = vtk.vtkTransformPolyDataFilter()
f41.SetInputConnection(ap.GetOutputPort())
f41.SetTransform(t4)
m41 = vtk.vtkDataSetMapper()
m41.SetInputConnection(f41.GetOutputPort())
a41 = vtk.vtkActor()
a41.SetMapper(m41)
a41.RotateY(90)
a41.GetProperty().SetColor(1,0,0)
#[a41 GetProperty] SetRepresentationToWireframe
ren41 = vtk.vtkRenderer()
ren41.SetViewport(0.75,0.5,1.0,1.0)
ren41.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren41.AddActor(a41)
renWin.AddRenderer(ren41)
#inverse
f42 = vtk.vtkTransformPolyDataFilter()
f42.SetInputConnection(ap.GetOutputPort())
f42.SetTransform(t4.GetInverse())
m42 = vtk.vtkDataSetMapper()
m42.SetInputConnection(f42.GetOutputPort())
a42 = vtk.vtkActor()
a42.SetMapper(m42)
a42.RotateY(90)
a42.GetProperty().SetColor(0.9,0.9,0)
#[a42 GetProperty] SetRepresentationToWireframe
ren42 = vtk.vtkRenderer()
ren42.SetViewport(0.75,0.0,1.0,0.5)
ren42.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren42.AddActor(a42)
renWin.AddRenderer(ren42)
t1.RotateX(-100)
t1.PostMultiply()
t1.RotateX(+100)
renWin.Render()
# --- end of script --
|
bsd-3-clause
|
chrisseto/modular-odm
|
tests/test_foreign.py
|
4
|
1849
|
#!/usr/bin/env python
# encoding: utf-8
from nose.tools import *
from tests.base import ModularOdmTestCase, TestObject
from modularodm import fields
class TestForeignList(ModularOdmTestCase):
def define_objects(self):
class Foo(TestObject):
_id = fields.IntegerField()
bars = fields.ForeignField('bar', list=True)
class Bar(TestObject):
_id = fields.IntegerField()
return Foo, Bar
def set_up_objects(self):
self.foo = self.Foo(_id=1)
self.bars = []
for idx in range(5):
self.bars.append(self.Bar(_id=idx))
self.bars[idx].save()
self.foo.bars = self.bars
self.foo.save()
def test_get_item(self):
assert_equal(self.bars[2], self.foo.bars[2])
def test_get_slice(self):
assert_equal(self.bars[:3], list(self.foo.bars[:3]))
def test_get_slice_extended(self):
assert_equal(self.bars[::-1], list(self.foo.bars[::-1]))
class TestAbstractForeignList(ModularOdmTestCase):
def define_objects(self):
class Foo(TestObject):
_id = fields.IntegerField()
bars = fields.AbstractForeignField(list=True)
class Bar(TestObject):
_id = fields.IntegerField()
return Foo, Bar
def set_up_objects(self):
self.foo = self.Foo(_id=1)
self.bars = []
for idx in range(5):
self.bars.append(self.Bar(_id=idx))
self.bars[idx].save()
self.foo.bars = self.bars
self.foo.save()
def test_get_item(self):
assert_equal(self.bars[2], self.foo.bars[2])
def test_get_slice(self):
assert_equal(self.bars[:3], list(self.foo.bars[:3]))
def test_get_slice_extended(self):
assert_equal(self.bars[::-1], list(self.foo.bars[::-1]))
|
apache-2.0
|
ycl2045/nova-master
|
nova/api/openstack/compute/plugins/v3/keypairs.py
|
10
|
6309
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack.compute.schemas.v3 import keypairs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common.gettextutils import _
ALIAS = 'keypairs'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
soft_authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class KeypairController(object):
"""Keypair API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.KeypairAPI()
def _filter_keypair(self, keypair, **attrs):
clean = {
'name': keypair.name,
'public_key': keypair.public_key,
'fingerprint': keypair.fingerprint,
}
for attr in attrs:
clean[attr] = keypair[attr]
return clean
@extensions.expected_errors((400, 409, 413))
@wsgi.response(201)
@validation.schema(keypairs.create)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
context = req.environ['nova.context']
authorize(context, action='create')
params = body['keypair']
name = params['name']
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(context,
context.user_id, name,
params['public_key'])
keypair = self._filter_keypair(keypair, user_id=True)
else:
keypair, private_key = self.api.create_key_pair(
context, context.user_id, name)
keypair = self._filter_keypair(keypair, user_id=True)
keypair['private_key'] = private_key
return {'keypair': keypair}
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise webob.exc.HTTPRequestEntityTooLarge(
explanation=msg,
headers={'Retry-After': 0})
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
authorize(context, action='delete')
try:
self.api.delete_key_pair(context, context.user_id, id)
except exception.KeypairNotFound:
raise webob.exc.HTTPNotFound()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data for the given key name."""
context = req.environ['nova.context']
authorize(context, action='show')
try:
keypair = self.api.get_key_pair(context, context.user_id, id)
except exception.KeypairNotFound:
raise webob.exc.HTTPNotFound()
return {'keypair': self._filter_keypair(keypair)}
@extensions.expected_errors(())
def index(self, req):
"""List of keypairs for a user."""
context = req.environ['nova.context']
authorize(context, action='index')
key_pairs = self.api.get_key_pairs(context, context.user_id)
rval = []
for key_pair in key_pairs:
rval.append({'keypair': self._filter_keypair(key_pair)})
return {'keypairs': rval}
class Controller(wsgi.Controller):
def _add_key_name(self, req, servers):
for server in servers:
db_server = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show'/'detail' methods.
server['key_name'] = db_server['key_name']
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
server = resp_obj.obj['server']
self._add_key_name(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if 'servers' in resp_obj.obj and soft_authorize(context):
servers = resp_obj.obj['servers']
self._add_key_name(req, servers)
class Keypairs(extensions.V3APIExtensionBase):
"""Keypair Support."""
name = "Keypairs"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension('keypairs',
KeypairController())]
return resources
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
def server_create(self, server_dict, create_kwargs):
create_kwargs['key_name'] = server_dict.get('key_name')
|
apache-2.0
|
liamgh/liamgreenhughes-sl4a-tf101
|
python/src/Lib/test/test_pwd.py
|
58
|
3352
|
import unittest
from test import test_support
import pwd
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assert_(isinstance(e.pw_name, basestring))
self.assertEqual(e[1], e.pw_passwd)
self.assert_(isinstance(e.pw_passwd, basestring))
self.assertEqual(e[2], e.pw_uid)
self.assert_(isinstance(e.pw_uid, int))
self.assertEqual(e[3], e.pw_gid)
self.assert_(isinstance(e.pw_gid, int))
self.assertEqual(e[4], e.pw_gecos)
self.assert_(isinstance(e.pw_gecos, basestring))
self.assertEqual(e[5], e.pw_dir)
self.assert_(isinstance(e.pw_dir, basestring))
self.assertEqual(e[6], e.pw_shell)
self.assert_(isinstance(e.pw_shell, basestring))
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip the rest
return
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assert_(pwd.getpwnam(e.pw_name) in entriesbyname[e.pw_name])
self.assert_(pwd.getpwuid(e.pw_uid) in entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = bynames.keys()
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in xrange(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# Choose a non-existent uid.
fakeuid = 4127
while fakeuid in byuids:
fakeuid = (fakeuid * 3) % 0x10000
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
def test_main():
test_support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8
|
lib/python2.7/encodings/mac_iceland.py
|
593
|
13754
|
""" Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-2.0
|
alertby/mbed
|
workspace_tools/host_tests/rtc_auto.py
|
122
|
2052
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from time import time, strftime, gmtime
class RTCTest():
PATTERN_RTC_VALUE = "\[(\d+)\] \[(\d+-\d+-\d+ \d+:\d+:\d+ [AaPpMm]{2})\]"
re_detect_rtc_value = re.compile(PATTERN_RTC_VALUE)
def test(self, selftest):
test_result = True
start = time()
sec_prev = 0
for i in range(0, 5):
# Timeout changed from default: we need to wait longer for some boards to start-up
c = selftest.mbed.serial_readline(timeout=10)
if c is None:
return selftest.RESULT_IO_SERIAL
selftest.notify(c.strip())
delta = time() - start
m = self.re_detect_rtc_value.search(c)
if m and len(m.groups()):
sec = int(m.groups()[0])
time_str = m.groups()[1]
correct_time_str = strftime("%Y-%m-%d %H:%M:%S %p", gmtime(float(sec)))
single_result = time_str == correct_time_str and sec > 0 and sec > sec_prev
test_result = test_result and single_result
result_msg = "OK" if single_result else "FAIL"
selftest.notify("HOST: [%s] [%s] received time %+d sec after %.2f sec... %s"% (sec, time_str, sec - sec_prev, delta, result_msg))
sec_prev = sec
else:
test_result = False
break
start = time()
return selftest.RESULT_SUCCESS if test_result else selftest.RESULT_FAILURE
|
apache-2.0
|
m8ttyB/socorro
|
webapp-django/crashstats/supersearch/tests/test_utils.py
|
3
|
1142
|
import datetime
from nose.tools import eq_
from django.utils.timezone import utc
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.topcrashers.views import get_date_boundaries
class TestDateBoundaries(BaseTestViews):
def test_get_date_boundaries(self):
# Simple test.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2010, 3, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 10).replace(tzinfo=utc))
# Test with messy dates.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'>2009-01-01T12:12:12',
'<2010-03-11T00:00:00',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2009, 1, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 11).replace(tzinfo=utc))
|
mpl-2.0
|
ojengwa/grr
|
client/client_actions/standard.py
|
2
|
20847
|
#!/usr/bin/env python
"""Standard actions that happen on the client."""
import cStringIO as StringIO
import ctypes
import gzip
import hashlib
import os
import platform
import socket
import sys
import time
import zlib
import psutil
import logging
from grr.client import actions
from grr.client import client_utils_common
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import crypto
# We do not send larger buffers than this:
MAX_BUFFER_SIZE = 640 * 1024
class ReadBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
# Now return the data to the server
self.SendReply(offset=offset, data=data,
length=len(data), pathspec=fd.pathspec)
HASH_CACHE = utils.FastStore(100)
class TransferBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length,
progress_callback=self.Progress)
result = rdfvalue.DataBlob(
data=zlib.compress(data),
compression=rdfvalue.DataBlob.CompressionType.ZCOMPRESSION)
digest = hashlib.sha256(data).digest()
# Ensure that the buffer is counted against this response. Check network
# send limit.
self.ChargeBytesToSession(len(data))
# Now return the data to the server into the special TransferStore well
# known flow.
self.grr_worker.SendReply(
result, session_id=rdfvalue.SessionID(flow_name="TransferStore"))
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class HashBuffer(actions.ActionPlugin):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length)
digest = hashlib.sha256(data).digest()
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class CopyPathToFile(actions.ActionPlugin):
"""Copy contents of a pathspec to a file on disk."""
in_rdfvalue = rdfvalue.CopyPathToFileRequest
out_rdfvalue = rdfvalue.CopyPathToFileRequest
BLOCK_SIZE = 10 * 1024 * 1024
def _Copy(self, dest_fd):
"""Copy from VFS to file until no more data or self.length is reached.
Args:
dest_fd: file object to write to
Returns:
self.written: bytes written
"""
while self.written < self.length:
to_read = min(self.length - self.written, self.BLOCK_SIZE)
data = self.src_fd.read(to_read)
if not data:
break
dest_fd.write(data)
self.written += len(data)
# Send heartbeats for long files.
self.Progress()
return self.written
def Run(self, args):
"""Read from a VFS file and write to a GRRTempFile on disk.
If file writing doesn't complete files won't be cleaned up.
Args:
args: see CopyPathToFile in jobs.proto
"""
self.src_fd = vfs.VFSOpen(args.src_path, progress_callback=self.Progress)
self.src_fd.Seek(args.offset)
offset = self.src_fd.Tell()
self.length = args.length or (1024 ** 4) # 1 TB
self.written = 0
suffix = ".gz" if args.gzip_output else ""
self.dest_fd = tempfiles.CreateGRRTempFile(directory=args.dest_dir,
lifetime=args.lifetime,
suffix=suffix)
self.dest_file = self.dest_fd.name
with self.dest_fd:
if args.gzip_output:
gzip_fd = gzip.GzipFile(self.dest_file, "wb", 9, self.dest_fd)
# Gzip filehandle needs its own close method called
with gzip_fd:
self._Copy(gzip_fd)
else:
self._Copy(self.dest_fd)
pathspec_out = rdfvalue.PathSpec(
path=self.dest_file, pathtype=rdfvalue.PathSpec.PathType.OS)
self.SendReply(offset=offset, length=self.written, src_path=args.src_path,
dest_dir=args.dest_dir, dest_path=pathspec_out,
gzip_output=args.gzip_output)
class ListDirectory(ReadBuffer):
"""Lists all the files in a directory."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Lists a directory."""
try:
directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(directory.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
for response in files:
self.SendReply(response)
class IteratedListDirectory(actions.IteratedAction):
"""Lists a directory as an iterator."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self, request, client_state):
"""Restores its way through the directory using an Iterator."""
try:
fd = vfs.VFSOpen(request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(fd.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
index = client_state.get("index", 0)
length = request.iterator.number
for response in files[index:index + length]:
self.SendReply(response)
# Update the state
client_state["index"] = index + length
class SuspendableListDirectory(actions.SuspendableAction):
"""Lists a directory as a suspendable client action."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self):
try:
fd = vfs.VFSOpen(self.request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
length = self.request.iterator.number
for group in utils.Grouper(fd.ListFiles(), length):
for response in group:
self.SendReply(response)
self.Suspend()
class StatFile(ListDirectory):
"""Sends a StatResponse for a single file."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Sends a StatResponse for a single file."""
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
res = fd.Stat()
self.SendReply(res)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
class ExecuteCommand(actions.ActionPlugin):
"""Executes one of the predefined commands."""
in_rdfvalue = rdfvalue.ExecuteRequest
out_rdfvalue = rdfvalue.ExecuteResponse
def Run(self, command):
"""Run."""
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecuteBinaryCommand(actions.ActionPlugin):
"""Executes a command from a passed in binary.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by the CONFIG[PrivateKeys.executable_signing_private_key],
which should be stored offline and well protected.
This method can be utilized as part of an autoupdate mechanism if necessary.
NOTE: If the binary is too large to fit inside a single request, the request
will have the more_data flag enabled, indicating more data is coming.
"""
in_rdfvalue = rdfvalue.ExecuteBinaryRequest
out_rdfvalue = rdfvalue.ExecuteBinaryResponse
suffix = ""
def WriteBlobToFile(self, request, suffix=""):
"""Writes the blob to a file and returns its path."""
lifetime = 0
# Only set the lifetime thread on the last chunk written.
if not request.more_data:
lifetime = request.time_limit
# Keep the file for at least 5 seconds after execution.
if lifetime > 0:
lifetime += 5
# First chunk truncates the file, later chunks append.
if request.offset == 0:
mode = "w+b"
else:
mode = "r+b"
temp_file = tempfiles.CreateGRRTempFile(filename=request.write_path,
suffix=suffix, mode=mode)
with temp_file:
path = temp_file.name
temp_file.seek(0, 2)
if temp_file.tell() != request.offset:
raise IOError("Chunks out of order Error.")
# Write the new chunk.
temp_file.write(request.executable.data)
return path
def CleanUp(self, path):
"""Removes the temp file."""
try:
if os.path.exists(path):
os.remove(path)
except (OSError, IOError), e:
logging.info("Failed to remove temporary file %s. Err: %s", path, e)
def Run(self, args):
"""Run."""
# Verify the executable blob.
args.executable.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
path = self.WriteBlobToFile(args, self.suffix)
# Only actually run the file on the last chunk.
if not args.more_data:
self.ProcessFile(path, args)
self.CleanUp(path)
def ProcessFile(self, path, args):
res = client_utils_common.Execute(path, args.args, args.time_limit,
bypass_whitelist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecutePython(actions.ActionPlugin):
"""Executes python code with exec.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by CONFIG[PrivateKeys.executable_signing_private_key], which
should be stored offline and well protected.
"""
in_rdfvalue = rdfvalue.ExecutePythonRequest
out_rdfvalue = rdfvalue.ExecutePythonResponse
def Run(self, args):
"""Run."""
time_start = time.time()
class StdOutHook(object):
def __init__(self, buf):
self.buf = buf
def write(self, text):
self.buf.write(text)
args.python_code.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
# The execed code can assign to this variable if it wants to return data.
logging.debug("exec for python code %s", args.python_code.data[0:100])
context = globals().copy()
context["py_args"] = args.py_args.ToDict()
context["magic_return_str"] = ""
# Export the Progress function to allow python hacks to call it.
context["Progress"] = self.Progress
stdout = StringIO.StringIO()
with utils.Stubber(sys, "stdout", StdOutHook(stdout)):
exec(args.python_code.data, context) # pylint: disable=exec-used
stdout_output = stdout.getvalue()
magic_str_output = context.get("magic_return_str")
if stdout_output and magic_str_output:
output = "Stdout: %s\nMagic Str:%s\n" % (stdout_output, magic_str_output)
else:
output = stdout_output or magic_str_output
time_used = time.time() - time_start
# We have to return microseconds.
result = rdfvalue.ExecutePythonResponse(
time_used=int(1e6 * time_used),
return_val=utils.SmartStr(output))
self.SendReply(result)
class Segfault(actions.ActionPlugin):
"""This action is just for debugging. It induces a segfault."""
in_rdfvalue = None
out_rdfvalue = None
def Run(self, unused_args):
"""Does the segfaulting."""
if flags.FLAGS.debug:
logging.warning("Segfault action requested :(")
print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents
else:
logging.warning("Segfault requested but not running in debug mode.")
class ListProcesses(actions.ActionPlugin):
"""This action lists all the processes running on a machine."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.Process
def Run(self, unused_arg):
# psutil will cause an active loop on Windows 2000
if platform.system() == "Windows" and platform.version().startswith("5.0"):
raise RuntimeError("ListProcesses not supported on Windows 2000")
for proc in psutil.process_iter():
response = rdfvalue.Process()
process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"]
for field in process_fields:
try:
value = getattr(proc, field)
if value is None:
continue
if callable(value):
value = value()
if not isinstance(value, (int, long)):
value = utils.SmartUnicode(value)
setattr(response, field, value)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
pass
try:
for arg in proc.cmdline():
response.cmdline.append(utils.SmartUnicode(arg))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.nice = proc.nice()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on Windows.
if hasattr(proc, "uids"):
(response.real_uid, response.effective_uid,
response.saved_uid) = proc.uids()
(response.real_gid, response.effective_gid,
response.saved_gid) = proc.gids()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.ctime = long(proc.create_time() * 1e6)
response.status = str(proc.status())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on OSX.
if hasattr(proc, "cwd"):
response.cwd = utils.SmartUnicode(proc.cwd())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.num_threads = proc.num_threads()
except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError):
pass
try:
(response.user_cpu_time,
response.system_cpu_time) = proc.cpu_times()
# This is very time consuming so we do not collect cpu_percent here.
# response.cpu_percent = proc.get_cpu_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.RSS_size, response.VMS_size = proc.memory_info()
response.memory_percent = proc.memory_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Due to a bug in psutil, this function is disabled for now
# (https://github.com/giampaolo/psutil/issues/340)
# try:
# for f in proc.open_files():
# response.open_files.append(utils.SmartUnicode(f.path))
# except (psutil.NoSuchProcess, psutil.AccessDenied):
# pass
try:
for c in proc.connections():
conn = response.connections.Append(family=c.family,
type=c.type,
pid=proc.pid)
try:
conn.state = c.status
except ValueError:
logging.info("Encountered unknown connection status (%s).",
c.status)
try:
conn.local_address.ip, conn.local_address.port = c.laddr
# Could be in state LISTEN.
if c.raddr:
conn.remote_address.ip, conn.remote_address.port = c.raddr
except AttributeError:
conn.local_address.ip, conn.local_address.port = c.local_address
# Could be in state LISTEN.
if c.remote_address:
(conn.remote_address.ip,
conn.remote_address.port) = c.remote_address
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
self.SendReply(response)
# Reading information here is slow so we heartbeat between processes.
self.Progress()
class SendFile(actions.ActionPlugin):
"""This action encrypts and sends a file to a remote listener."""
in_rdfvalue = rdfvalue.SendFileRequest
out_rdfvalue = rdfvalue.StatEntry
BLOCK_SIZE = 1024 * 1024 * 10 # 10 MB
def Send(self, sock, msg):
totalsent = 0
n = len(msg)
while totalsent < n:
sent = sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent += sent
def Run(self, args):
"""Run."""
# Open the file.
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
if args.address_family == rdfvalue.NetworkAddress.Family.INET:
family = socket.AF_INET
elif args.address_family == rdfvalue.NetworkAddress.Family.INET6:
family = socket.AF_INET6
else:
raise RuntimeError("Socket address family not supported.")
s = socket.socket(family, socket.SOCK_STREAM)
try:
s.connect((args.host, args.port))
except socket.error as e:
raise RuntimeError(str(e))
cipher = crypto.AES128CBCCipher(args.key, args.iv,
crypto.Cipher.OP_ENCRYPT)
while True:
data = fd.read(self.BLOCK_SIZE)
if not data:
break
self.Send(s, cipher.Update(data))
# Send heartbeats for long files.
self.Progress()
self.Send(s, cipher.Final())
s.close()
self.SendReply(fd.Stat())
class StatFS(actions.ActionPlugin):
"""Call os.statvfs for a given list of paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
"""
in_rdfvalue = rdfvalue.StatFSRequest
out_rdfvalue = rdfvalue.Volume
def Run(self, args):
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdfvalue.PathSpec(path=path, pathtype=args.pathtype),
progress_callback=self.Progress)
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
continue
unix = rdfvalue.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
result = rdfvalue.Volume(bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unix=unix)
self.SendReply(result)
|
apache-2.0
|
taiyuanfang/gyp
|
test/win/gyptest-cl-buffer-security-check.py
|
344
|
1612
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
|
bsd-3-clause
|
40223135/40223135-
|
static/Brython3.1.1-20150328-091302/Lib/multiprocessing/__init__.py
|
693
|
6866
|
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk ([email protected])'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
#brython fix me
#def allow_connection_pickling():
# '''
# Install support for sending connections and sockets between processes
# '''
# # This is undocumented. In previous versions of multiprocessing
# # its only effect was to make socket objects inheritable on Windows.
# import multiprocessing.connection
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Barrier(parties, action=None, timeout=None):
'''
Returns a barrier object
'''
from multiprocessing.synchronize import Barrier
return Barrier(parties, action, timeout)
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def SimpleQueue():
'''
Returns a queue object
'''
from multiprocessing.queues import SimpleQueue
return SimpleQueue()
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, lock=True):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock)
def Array(typecode_or_type, size_or_initializer, *, lock=True):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
|
gpl-3.0
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/network/enos/enos_config.py
|
42
|
11179
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to configure Lenovo Switches.
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: enos_config
version_added: "2.5"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage Lenovo ENOS configuration sections
description:
- Lenovo ENOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ENOS configuration sections in
a deterministic way.
extends_documentation_fragment: enos
notes:
- Tested against ENOS 8.4.1
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block', 'config']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
default: 'configured by enos_config'
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- name: configure top level configuration
enos_config:
"lines: hostname {{ inventory_hostname }}"
- name: configure interface settings
enos_config:
lines:
- enable
- ip ospf enable
parents: interface ip 13
- name: load a config from disk and replace the current config
enos_config:
src: config.cfg
backup: yes
- name: configurable backup path
enos_config:
src: config.cfg
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/enos01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.enos.enos import load_config, get_config
from ansible.module_utils.network.enos.enos import enos_argument_spec
from ansible.module_utils.network.enos.enos import check_args
from ansible.module_utils.network.common.config import NetworkConfig, dumps
DEFAULT_COMMIT_COMMENT = 'configured by enos_config'
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
admin = module.params['admin']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
admin=dict(type='bool', default=False)
)
argument_spec.update(enos_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
lizardsystem/lizard-measure
|
lizard_measure/migrations/0010_auto__del_score__del_measuringrod__del_field_measurestatusmoment_is_pl.py
|
1
|
23606
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Score'
db.delete_table('lizard_measure_score')
# Deleting model 'MeasuringRod'
db.delete_table('lizard_measure_measuringrod')
# Deleting field 'MeasureStatusMoment.is_planning'
db.delete_column('lizard_measure_measurestatusmoment', 'is_planning')
# Deleting field 'MeasureStatusMoment.date'
db.delete_column('lizard_measure_measurestatusmoment', 'date')
# Adding field 'MeasureStatusMoment.planning_date'
db.add_column('lizard_measure_measurestatusmoment', 'planning_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'MeasureStatusMoment.realisation_date'
db.add_column('lizard_measure_measurestatusmoment', 'realisation_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.total_costs'
db.delete_column('lizard_measure_measure', 'total_costs')
# Adding field 'Measure.valid'
db.add_column('lizard_measure_measure', 'valid', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True), keep_default=False)
# Adding field 'Measure.geom'
db.add_column('lizard_measure_measure', 'geom', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding model 'Score'
db.create_table('lizard_measure_score', (
('gep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('area_ident', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('ascending', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('mep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_measure.MeasuringRod'])),
('limit_bad_insufficient', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_area.Area'], null=True, blank=True)),
('target_2027', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('limit_insufficient_moderate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('target_2015', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['Score'])
# Adding model 'MeasuringRod'
db.create_table('lizard_measure_measuringrod', (
('group', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sign', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sub_measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('unit', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['MeasuringRod'])
# Adding field 'MeasureStatusMoment.is_planning'
db.add_column('lizard_measure_measurestatusmoment', 'is_planning', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'MeasureStatusMoment.date'
db.add_column('lizard_measure_measurestatusmoment', 'date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'MeasureStatusMoment.planning_date'
db.delete_column('lizard_measure_measurestatusmoment', 'planning_date')
# Deleting field 'MeasureStatusMoment.realisation_date'
db.delete_column('lizard_measure_measurestatusmoment', 'realisation_date')
# Adding field 'Measure.total_costs'
db.add_column('lizard_measure_measure', 'total_costs', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.valid'
db.delete_column('lizard_measure_measure', 'valid')
# Deleting field 'Measure.geom'
db.delete_column('lizard_measure_measure', 'geom')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.fundingorganization': {
'Meta': {'object_name': 'FundingOrganization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Organization']"}),
'percentage': ('django.db.models.fields.FloatField', [], {})
},
'lizard_measure.krwstatus': {
'Meta': {'object_name': 'KRWStatus'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.krwwatertype': {
'Meta': {'object_name': 'KRWWatertype'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measure': {
'Meta': {'ordering': "('id',)", 'object_name': 'Measure'},
'aggregation_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'area_measure_set'", 'blank': 'True', 'to': "orm['lizard_area.Area']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'datetime_in_source': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'executive': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'executive_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'exploitation_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'funding_organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Organization']", 'through': "orm['lizard_measure.FundingOrganization']", 'symmetrical': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'geometry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObject']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'import_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'import_source': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'initiator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initiator_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'investment_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_KRW_measure': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_indicator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'measure_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureType']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']", 'null': 'True', 'blank': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasurePeriod']", 'null': 'True', 'blank': 'True'}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'responsible_department': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'status_moments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureStatus']", 'through': "orm['lizard_measure.MeasureStatusMoment']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Unit']"}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {}),
'waterbodies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.WaterBody']", 'symmetrical': 'False', 'blank': 'True'})
},
'lizard_measure.measurecategory': {
'Meta': {'object_name': 'MeasureCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measureperiod': {
'Meta': {'ordering': "('start_date', 'end_date')", 'object_name': 'MeasurePeriod'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measurestatus': {
'Meta': {'ordering': "('-value',)", 'object_name': 'MeasureStatus'},
'color': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'lizard_measure.measurestatusmoment': {
'Meta': {'ordering': "('measure__id', 'status__value')", 'object_name': 'MeasureStatusMoment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'exploitation_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investment_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'planning_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'realisation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureStatus']"})
},
'lizard_measure.measuretype': {
'Meta': {'ordering': "('code',)", 'object_name': 'MeasureType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'combined_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'harmonisation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'klass': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'subcategory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Unit']", 'symmetrical': 'False', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.organization': {
'Meta': {'ordering': "('description',)", 'unique_together': "(('source', 'code'),)", 'object_name': 'Organization'},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.steeringparameter': {
'Meta': {'object_name': 'SteeringParameter'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'fews_parameter': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_maximum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'target_minimum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.unit': {
'Meta': {'object_name': 'Unit'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'conversion_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.waterbody': {
'Meta': {'object_name': 'WaterBody'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'area_ident': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'krw_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWStatus']", 'null': 'True', 'blank': 'True'}),
'krw_watertype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWWatertype']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_measure']
|
gpl-3.0
|
konrad/kufpybio
|
kufpybiotools/generate_igr_gff.py
|
1
|
1881
|
#!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <[email protected]>"
__copyright__ = "2013 by Konrad Foerstner <[email protected]>"
__license__ = "ISC license"
__email__ = "[email protected]"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
parser.add_argument("--margin", type=int, default=0)
parser.add_argument("--plus_only", default=False, action="store_true")
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
strands = ["+", "-"]
if args.plus_only is True:
strands = ["+"]
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
start = start + args.margin
end = end - args.margin
if end <= start:
continue
for strand in strands:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
|
isc
|
Panaetius/woipv
|
src/models/train_model.py
|
1
|
20953
|
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python import debug as tf_debug
import os
import time
from datetime import datetime
import numpy as np
from model import WoipvModel, NetworkType
from mscoco_input import MSCOCOInputProducer
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/training/woipv_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
class Config(object):
path = "%s/../../data/processed/MSCOCO/" % os.path.dirname(
os.path.realpath(__file__))
chkpt_path = "%s/../../models/transfer_chkpt/" % os.path.dirname(
os.path.realpath(__file__))
num_examples_per_epoch = 72000
num_epochs_per_decay = 3
is_training = True
num_classes = 90
initial_learning_rate = 1e-4
learning_rate_decay_factor = 0.25
width = 600
height = 600
min_box_size = 10
rcnn_cls_loss_weight = 95.0 / (256)
rcnn_reg_loss_weight = 0.005
rpn_cls_loss_weight = 2.0
rpn_reg_loss_weight = 25.0
dropout_prob = 0.5
weight_decay = 0.0001
net = NetworkType.PRETRAINED
pretrained_checkpoint_path = "%s/../../models/pretrained/"% os.path.dirname(
os.path.realpath(__file__))
pretrained_checkpoint_meta = "ResNet-L50.meta"
restore_from_chkpt = False
resnet_34_variables_to_restore = ['first_layer/weights:0', 'first_layer/Variable:0', 'first_layer/Variable_1:0', 'first_layer/Variable_2:0', 'first_layer/Variable_3:0', 'reslayer_64_0/sub1/weights:0', 'reslayer_64_0/sub1/Variable:0', 'reslayer_64_0/sub1/Variable_1:0', 'reslayer_64_0/sub1/Variable_2:0', 'reslayer_64_0/sub1/Variable_3:0', 'reslayer_64_0/sub2/weights:0', 'reslayer_64_0/sub2/Variable:0', 'reslayer_64_0/sub2/Variable_1:0', 'reslayer_64_0/sub2/Variable_2:0', 'reslayer_64_0/sub2/Variable_3:0', 'reslayer_64_1/sub1/weights:0', 'reslayer_64_1/sub1/Variable:0', 'reslayer_64_1/sub1/Variable_1:0', 'reslayer_64_1/sub1/Variable_2:0', 'reslayer_64_1/sub1/Variable_3:0', 'reslayer_64_1/sub2/weights:0', 'reslayer_64_1/sub2/Variable:0', 'reslayer_64_1/sub2/Variable_1:0', 'reslayer_64_1/sub2/Variable_2:0', 'reslayer_64_1/sub2/Variable_3:0', 'reslayer_64_2/sub1/weights:0', 'reslayer_64_2/sub1/Variable:0', 'reslayer_64_2/sub1/Variable_1:0', 'reslayer_64_2/sub1/Variable_2:0', 'reslayer_64_2/sub1/Variable_3:0', 'reslayer_64_2/sub2/weights:0', 'reslayer_64_2/sub2/Variable:0', 'reslayer_64_2/sub2/Variable_1:0', 'reslayer_64_2/sub2/Variable_2:0', 'reslayer_64_2/sub2/Variable_3:0', 'reslayer_downsample_128/sub1/weights:0', 'reslayer_downsample_128/sub1/Variable:0', 'reslayer_downsample_128/sub1/Variable_1:0', 'reslayer_downsample_128/sub1/Variable_2:0', 'reslayer_downsample_128/sub1/Variable_3:0', 'reslayer_downsample_128/sub2/weights:0', 'reslayer_downsample_128/sub2/Variable:0', 'reslayer_downsample_128/sub2/Variable_1:0', 'reslayer_downsample_128/sub2/Variable_2:0', 'reslayer_downsample_128/sub2/Variable_3:0', 'reslayer_128_0/sub1/weights:0', 'reslayer_128_0/sub1/Variable:0', 'reslayer_128_0/sub1/Variable_1:0', 'reslayer_128_0/sub1/Variable_2:0', 'reslayer_128_0/sub1/Variable_3:0', 'reslayer_128_0/sub2/weights:0', 'reslayer_128_0/sub2/Variable:0', 'reslayer_128_0/sub2/Variable_1:0', 'reslayer_128_0/sub2/Variable_2:0', 'reslayer_128_0/sub2/Variable_3:0', 'reslayer_128_1/sub1/weights:0', 'reslayer_128_1/sub1/Variable:0', 'reslayer_128_1/sub1/Variable_1:0', 'reslayer_128_1/sub1/Variable_2:0', 'reslayer_128_1/sub1/Variable_3:0', 'reslayer_128_1/sub2/weights:0', 'reslayer_128_1/sub2/Variable:0', 'reslayer_128_1/sub2/Variable_1:0', 'reslayer_128_1/sub2/Variable_2:0', 'reslayer_128_1/sub2/Variable_3:0', 'reslayer_128_2/sub1/weights:0', 'reslayer_128_2/sub1/Variable:0', 'reslayer_128_2/sub1/Variable_1:0', 'reslayer_128_2/sub1/Variable_2:0', 'reslayer_128_2/sub1/Variable_3:0', 'reslayer_128_2/sub2/weights:0', 'reslayer_128_2/sub2/Variable:0', 'reslayer_128_2/sub2/Variable_1:0', 'reslayer_128_2/sub2/Variable_2:0', 'reslayer_128_2/sub2/Variable_3:0', 'reslayer_downsample_256/sub1/weights:0', 'reslayer_downsample_256/sub1/Variable:0', 'reslayer_downsample_256/sub1/Variable_1:0', 'reslayer_downsample_256/sub1/Variable_2:0', 'reslayer_downsample_256/sub1/Variable_3:0', 'reslayer_downsample_256/sub2/weights:0', 'reslayer_downsample_256/sub2/Variable:0', 'reslayer_downsample_256/sub2/Variable_1:0', 'reslayer_downsample_256/sub2/Variable_2:0', 'reslayer_downsample_256/sub2/Variable_3:0', 'reslayer_256_0/sub1/weights:0', 'reslayer_256_0/sub1/Variable:0', 'reslayer_256_0/sub1/Variable_1:0', 'reslayer_256_0/sub1/Variable_2:0', 'reslayer_256_0/sub1/Variable_3:0', 'reslayer_256_0/sub2/weights:0', 'reslayer_256_0/sub2/Variable:0', 'reslayer_256_0/sub2/Variable_1:0', 'reslayer_256_0/sub2/Variable_2:0', 'reslayer_256_0/sub2/Variable_3:0', 'reslayer_256_1/sub1/weights:0', 'reslayer_256_1/sub1/Variable:0', 'reslayer_256_1/sub1/Variable_1:0', 'reslayer_256_1/sub1/Variable_2:0', 'reslayer_256_1/sub1/Variable_3:0', 'reslayer_256_1/sub2/weights:0', 'reslayer_256_1/sub2/Variable:0', 'reslayer_256_1/sub2/Variable_1:0', 'reslayer_256_1/sub2/Variable_2:0', 'reslayer_256_1/sub2/Variable_3:0', 'reslayer_256_2/sub1/weights:0', 'reslayer_256_2/sub1/Variable:0', 'reslayer_256_2/sub1/Variable_1:0', 'reslayer_256_2/sub1/Variable_2:0', 'reslayer_256_2/sub1/Variable_3:0', 'reslayer_256_2/sub2/weights:0', 'reslayer_256_2/sub2/Variable:0', 'reslayer_256_2/sub2/Variable_1:0', 'reslayer_256_2/sub2/Variable_2:0', 'reslayer_256_2/sub2/Variable_3:0', 'reslayer_256_3/sub1/weights:0', 'reslayer_256_3/sub1/Variable:0', 'reslayer_256_3/sub1/Variable_1:0', 'reslayer_256_3/sub1/Variable_2:0', 'reslayer_256_3/sub1/Variable_3:0', 'reslayer_256_3/sub2/weights:0', 'reslayer_256_3/sub2/Variable:0', 'reslayer_256_3/sub2/Variable_1:0', 'reslayer_256_3/sub2/Variable_2:0', 'reslayer_256_3/sub2/Variable_3:0', 'reslayer_256_4/sub1/weights:0', 'reslayer_256_4/sub1/Variable:0', 'reslayer_256_4/sub1/Variable_1:0', 'reslayer_256_4/sub1/Variable_2:0', 'reslayer_256_4/sub1/Variable_3:0', 'reslayer_256_4/sub2/weights:0', 'reslayer_256_4/sub2/Variable:0', 'reslayer_256_4/sub2/Variable_1:0', 'reslayer_256_4/sub2/Variable_2:0', 'reslayer_256_4/sub2/Variable_3:0', 'reslayer_downsample_512/sub1/weights:0', 'reslayer_downsample_512/sub1/Variable:0', 'reslayer_downsample_512/sub1/Variable_1:0', 'reslayer_downsample_512/sub1/Variable_2:0', 'reslayer_downsample_512/sub1/Variable_3:0', 'reslayer_downsample_512/sub2/weights:0', 'reslayer_downsample_512/sub2/Variable:0', 'reslayer_downsample_512/sub2/Variable_1:0', 'reslayer_downsample_512/sub2/Variable_2:0', 'reslayer_downsample_512/sub2/Variable_3:0', 'reslayer_512_0/sub1/weights:0', 'reslayer_512_0/sub1/Variable:0', 'reslayer_512_0/sub1/Variable_1:0', 'reslayer_512_0/sub1/Variable_2:0', 'reslayer_512_0/sub1/Variable_3:0', 'reslayer_512_0/sub2/weights:0', 'reslayer_512_0/sub2/Variable:0', 'reslayer_512_0/sub2/Variable_1:0', 'reslayer_512_0/sub2/Variable_2:0', 'reslayer_512_0/sub2/Variable_3:0', 'reslayer_512_1/sub1/weights:0', 'reslayer_512_1/sub1/Variable:0', 'reslayer_512_1/sub1/Variable_1:0', 'reslayer_512_1/sub1/Variable_2:0', 'reslayer_512_1/sub1/Variable_3:0', 'reslayer_512_1/sub2/weights:0', 'reslayer_512_1/sub2/Variable:0', 'reslayer_512_1/sub2/Variable_1:0', 'reslayer_512_1/sub2/Variable_2:0', 'reslayer_512_1/sub2/Variable_3:0']
resnet_50_variables_to_restore = ['first_layer/weights:0', 'first_layer/Variable:0', 'first_layer/Variable_1:0', 'first_layer/Variable_2:0', 'first_layer/Variable_3:0', 'reslayer_64_0/sub1/weights:0', 'reslayer_64_0/sub1/Variable:0', 'reslayer_64_0/sub1/Variable_1:0', 'reslayer_64_0/sub1/Variable_2:0', 'reslayer_64_0/sub1/Variable_3:0', 'reslayer_64_0/sub2/weights:0', 'reslayer_64_0/sub2/Variable:0', 'reslayer_64_0/sub2/Variable_1:0', 'reslayer_64_0/sub2/Variable_2:0', 'reslayer_64_0/sub2/Variable_3:0', 'reslayer_64_1/sub1/weights:0', 'reslayer_64_1/sub1/Variable:0', 'reslayer_64_1/sub1/Variable_1:0', 'reslayer_64_1/sub1/Variable_2:0', 'reslayer_64_1/sub1/Variable_3:0', 'reslayer_64_1/sub2/weights:0', 'reslayer_64_1/sub2/Variable:0', 'reslayer_64_1/sub2/Variable_1:0', 'reslayer_64_1/sub2/Variable_2:0', 'reslayer_64_1/sub2/Variable_3:0', 'reslayer_64_2/sub1/weights:0', 'reslayer_64_2/sub1/Variable:0', 'reslayer_64_2/sub1/Variable_1:0', 'reslayer_64_2/sub1/Variable_2:0', 'reslayer_64_2/sub1/Variable_3:0', 'reslayer_64_2/sub2/weights:0', 'reslayer_64_2/sub2/Variable:0', 'reslayer_64_2/sub2/Variable_1:0', 'reslayer_64_2/sub2/Variable_2:0', 'reslayer_64_2/sub2/Variable_3:0', 'reslayer_downsample_128/sub1/weights:0', 'reslayer_downsample_128/sub1/Variable:0', 'reslayer_downsample_128/sub1/Variable_1:0', 'reslayer_downsample_128/sub1/Variable_2:0', 'reslayer_downsample_128/sub1/Variable_3:0', 'reslayer_downsample_128/sub2/weights:0', 'reslayer_downsample_128/sub2/Variable:0', 'reslayer_downsample_128/sub2/Variable_1:0', 'reslayer_downsample_128/sub2/Variable_2:0', 'reslayer_downsample_128/sub2/Variable_3:0', 'reslayer_128_0/sub1/weights:0', 'reslayer_128_0/sub1/Variable:0', 'reslayer_128_0/sub1/Variable_1:0', 'reslayer_128_0/sub1/Variable_2:0', 'reslayer_128_0/sub1/Variable_3:0', 'reslayer_128_0/sub2/weights:0', 'reslayer_128_0/sub2/Variable:0', 'reslayer_128_0/sub2/Variable_1:0', 'reslayer_128_0/sub2/Variable_2:0', 'reslayer_128_0/sub2/Variable_3:0', 'reslayer_128_1/sub1/weights:0', 'reslayer_128_1/sub1/Variable:0', 'reslayer_128_1/sub1/Variable_1:0', 'reslayer_128_1/sub1/Variable_2:0', 'reslayer_128_1/sub1/Variable_3:0', 'reslayer_128_1/sub2/weights:0', 'reslayer_128_1/sub2/Variable:0', 'reslayer_128_1/sub2/Variable_1:0', 'reslayer_128_1/sub2/Variable_2:0', 'reslayer_128_1/sub2/Variable_3:0', 'reslayer_128_2/sub1/weights:0', 'reslayer_128_2/sub1/Variable:0', 'reslayer_128_2/sub1/Variable_1:0', 'reslayer_128_2/sub1/Variable_2:0', 'reslayer_128_2/sub1/Variable_3:0', 'reslayer_128_2/sub2/weights:0', 'reslayer_128_2/sub2/Variable:0', 'reslayer_128_2/sub2/Variable_1:0', 'reslayer_128_2/sub2/Variable_2:0', 'reslayer_128_2/sub2/Variable_3:0', 'reslayer_downsample_256/sub1/weights:0', 'reslayer_downsample_256/sub1/Variable:0', 'reslayer_downsample_256/sub1/Variable_1:0', 'reslayer_downsample_256/sub1/Variable_2:0', 'reslayer_downsample_256/sub1/Variable_3:0', 'reslayer_downsample_256/sub2/weights:0', 'reslayer_downsample_256/sub2/Variable:0', 'reslayer_downsample_256/sub2/Variable_1:0', 'reslayer_downsample_256/sub2/Variable_2:0', 'reslayer_downsample_256/sub2/Variable_3:0', 'reslayer_256_0/sub1/weights:0', 'reslayer_256_0/sub1/Variable:0', 'reslayer_256_0/sub1/Variable_1:0', 'reslayer_256_0/sub1/Variable_2:0', 'reslayer_256_0/sub1/Variable_3:0', 'reslayer_256_0/sub2/weights:0', 'reslayer_256_0/sub2/Variable:0', 'reslayer_256_0/sub2/Variable_1:0', 'reslayer_256_0/sub2/Variable_2:0', 'reslayer_256_0/sub2/Variable_3:0', 'reslayer_256_1/sub1/weights:0', 'reslayer_256_1/sub1/Variable:0', 'reslayer_256_1/sub1/Variable_1:0', 'reslayer_256_1/sub1/Variable_2:0', 'reslayer_256_1/sub1/Variable_3:0', 'reslayer_256_1/sub2/weights:0', 'reslayer_256_1/sub2/Variable:0', 'reslayer_256_1/sub2/Variable_1:0', 'reslayer_256_1/sub2/Variable_2:0', 'reslayer_256_1/sub2/Variable_3:0', 'reslayer_256_2/sub1/weights:0', 'reslayer_256_2/sub1/Variable:0', 'reslayer_256_2/sub1/Variable_1:0', 'reslayer_256_2/sub1/Variable_2:0', 'reslayer_256_2/sub1/Variable_3:0', 'reslayer_256_2/sub2/weights:0', 'reslayer_256_2/sub2/Variable:0', 'reslayer_256_2/sub2/Variable_1:0', 'reslayer_256_2/sub2/Variable_2:0', 'reslayer_256_2/sub2/Variable_3:0', 'reslayer_256_3/sub1/weights:0', 'reslayer_256_3/sub1/Variable:0', 'reslayer_256_3/sub1/Variable_1:0', 'reslayer_256_3/sub1/Variable_2:0', 'reslayer_256_3/sub1/Variable_3:0', 'reslayer_256_3/sub2/weights:0', 'reslayer_256_3/sub2/Variable:0', 'reslayer_256_3/sub2/Variable_1:0', 'reslayer_256_3/sub2/Variable_2:0', 'reslayer_256_3/sub2/Variable_3:0', 'reslayer_256_4/sub1/weights:0', 'reslayer_256_4/sub1/Variable:0', 'reslayer_256_4/sub1/Variable_1:0', 'reslayer_256_4/sub1/Variable_2:0', 'reslayer_256_4/sub1/Variable_3:0', 'reslayer_256_4/sub2/weights:0', 'reslayer_256_4/sub2/Variable:0', 'reslayer_256_4/sub2/Variable_1:0', 'reslayer_256_4/sub2/Variable_2:0', 'reslayer_256_4/sub2/Variable_3:0', 'reslayer_downsample_512/sub1/weights:0', 'reslayer_downsample_512/sub1/Variable:0', 'reslayer_downsample_512/sub1/Variable_1:0', 'reslayer_downsample_512/sub1/Variable_2:0', 'reslayer_downsample_512/sub1/Variable_3:0', 'reslayer_downsample_512/sub2/weights:0', 'reslayer_downsample_512/sub2/Variable:0', 'reslayer_downsample_512/sub2/Variable_1:0', 'reslayer_downsample_512/sub2/Variable_2:0', 'reslayer_downsample_512/sub2/Variable_3:0', 'reslayer_512_0/sub1/weights:0', 'reslayer_512_0/sub1/Variable:0', 'reslayer_512_0/sub1/Variable_1:0', 'reslayer_512_0/sub1/Variable_2:0', 'reslayer_512_0/sub1/Variable_3:0', 'reslayer_512_0/sub2/weights:0', 'reslayer_512_0/sub2/Variable:0', 'reslayer_512_0/sub2/Variable_1:0', 'reslayer_512_0/sub2/Variable_2:0', 'reslayer_512_0/sub2/Variable_3:0', 'reslayer_512_1/sub1/weights:0', 'reslayer_512_1/sub1/Variable:0', 'reslayer_512_1/sub1/Variable_1:0', 'reslayer_512_1/sub1/Variable_2:0', 'reslayer_512_1/sub1/Variable_3:0', 'reslayer_512_1/sub2/weights:0', 'reslayer_512_1/sub2/Variable:0', 'reslayer_512_1/sub2/Variable_1:0', 'reslayer_512_1/sub2/Variable_2:0', 'reslayer_512_1/sub2/Variable_3:0']
graph = tf.Graph()
def train():
"""Train ip5wke for a number of steps."""
print("Building graph %.3f" % time.time())
cfg = Config()
with cfg.graph.as_default():
# Get images and labels for ip5wke.
input_producer = MSCOCOInputProducer(cfg)
images, categories, bboxes = input_producer.inputs()
model = WoipvModel(cfg)
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
conv_output = None
if cfg.net == NetworkType.PRETRAINED:
print("restoring pretrained model")
new_saver = tf.train.import_meta_graph(cfg.pretrained_checkpoint_path + cfg.pretrained_checkpoint_meta, input_map={'images': images})
new_saver.restore(sess, tf.train.latest_checkpoint(cfg.pretrained_checkpoint_path))
conv_output = cfg.graph.get_tensor_by_name('scale4/block6/Relu:0')
print(conv_output)
global_step = tf.Variable(0, trainable=False, name="global_step")
# Build a Graph that computes the logits predictions from the
# inference model.
class_scores, region_scores, rpn_class_scores, rpn_region_scores, \
proposed_boxes = \
model.inference(images, conv_output)
# Calculate loss.
loss, rcn_accuracy, rpn_accuracy = model.loss(class_scores,
region_scores,
rpn_class_scores,
rpn_region_scores, categories, bboxes, proposed_boxes, images)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = model.train(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(),
write_version=tf.train.SaverDef.V2)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph.
print("Running init %.3f" % time.time())
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.run(init)
if cfg.restore_from_chkpt:
# restore variables (for transfer learning)
print("Restoring checkpoint for transfer learning %.3f" %
time.time())
ckpt = tf.train.get_checkpoint_state(cfg.chkpt_path)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
if self.net == NetworkType.RESNET34:
variables_to_restore = self.resnet_34_variables_to_restore
elif self.net == NetworkType.RESNET50:
variables_to_restore = self.resnet_50_variables_to_restore
variables_to_restore = [v for v in tf.global_variables() if v.name
in cfg.variables_to_restore]
chkpt_saver = tf.train.Saver(variables_to_restore,
write_version=tf.train.SaverDef.V2)
chkpt_saver.restore(sess, ckpt.model_checkpoint_path)
print("checkpoint restored %.3f" % time.time())
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# run_metadata = tf.RunMetadata()
print("Started training %.3f" % time.time())
for step in range(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
# options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
# run_metadata=run_metadata)
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
#tf.contrib.tfprof.model_analyzer.print_model_analysis(
# tf.get_default_graph(),
# run_meta=run_metadata,
# tfprof_options={
# 'max_depth': 10000,
# 'min_bytes': 1, # Only >=1
# 'min_micros': 1, # Only >=1
# 'min_params': 0,
# 'min_float_ops': 0,
# 'device_regexes': ['.*'],
# 'order_by': 'name',
# 'account_type_regexes': ['.*'],
# 'start_name_regexes': ['.*'],
# 'trim_name_regexes': [],
# 'show_name_regexes': ['.*'],
# 'hide_name_regexes': [],
# 'account_displayed_op_only': True,
# 'select': ['micros'],
# 'viz': False,
# 'dump_to_file': ''
# })
#return
if step % 50 == 0:
examples_per_sec = 1.0 / duration
sec_per_batch = float(duration)
# correct_prediction = tf.equal(tf.argmax(logits, 1),
# tf.cast(labels, tf.int64))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction,
# tf.float32))
# train_acc = sess.run(accuracy)
# tf.summary.scalar('accuracy', accuracy)
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
# trace_file = open('timeline.ctf.json', 'w')
# trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
# trace_file.close()
rcn_acc, rpn_acc = sess.run([rcn_accuracy, rpn_accuracy])
format_str = ('%s: step %d, loss = %.2f, rcn_accuracy = %.3f '
' rpn_acc = %.3f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
rcn_acc, rpn_acc,
examples_per_sec, sec_per_batch))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
# noinspection PyUnusedLocal
def main(argv=None):
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
mit
|
ininex/geofire-python
|
resource/lib/python2.7/site-packages/gcloud/bigquery/client.py
|
3
|
10779
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from gcloud.client import JSONClient
from gcloud.bigquery.connection import Connection
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.job import CopyJob
from gcloud.bigquery.job import ExtractTableToStorageJob
from gcloud.bigquery.job import LoadTableFromStorageJob
from gcloud.bigquery.job import QueryJob
from gcloud.bigquery.query import QueryResults
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of. Will be
passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def list_datasets(self, include_all=False, max_results=None,
page_token=None):
"""List datasets for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/datasets/list
:type include_all: boolean
:param include_all: True if results include hidden datasets.
:type max_results: int
:param max_results: maximum number of datasets to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of datasets. If
not passed, the API will return the first page of
datasets.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.bigquery.dataset.Dataset`, plus a
"next page token" string: if the token is not None,
indicates that more datasets can be retrieved with another
call (pass that value as ``page_token``).
"""
params = {}
if include_all:
params['all'] = True
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
path = '/projects/%s/datasets' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
datasets = [Dataset.from_api_repr(resource, self)
for resource in resp.get('datasets', ())]
return datasets, resp.get('nextPageToken')
def dataset(self, dataset_name):
"""Construct a dataset bound to this client.
:type dataset_name: str
:param dataset_name: Name of the dataset.
:rtype: :class:`gcloud.bigquery.dataset.Dataset`
:returns: a new ``Dataset`` instance
"""
return Dataset(dataset_name, client=self)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`gcloud.bigquery.job.LoadTableFromStorageJob`,
:class:`gcloud.bigquery.job.CopyJob`,
:class:`gcloud.bigquery.job.ExtractTableToStorageJob`,
:class:`gcloud.bigquery.job.QueryJob`,
:class:`gcloud.bigquery.job.RunSyncQueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource['configuration']
if 'load' in config:
return LoadTableFromStorageJob.from_api_repr(resource, self)
elif 'copy' in config:
return CopyJob.from_api_repr(resource, self)
elif 'extract' in config:
return ExtractTableToStorageJob.from_api_repr(resource, self)
elif 'query' in config:
return QueryJob.from_api_repr(resource, self)
raise ValueError('Cannot parse job resource')
def list_jobs(self, max_results=None, page_token=None, all_users=None,
state_filter=None):
"""List jobs for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/list
:type max_results: int
:param max_results: maximum number of jobs to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of jobs. If
not passed, the API will return the first page of
jobs.
:type all_users: boolean
:param all_users: if true, include jobs owned by all users in the
project.
:type state_filter: str
:param state_filter: if passed, include only jobs matching the given
state. One of
* ``"done"``
* ``"pending"``
* ``"running"``
:rtype: tuple, (list, str)
:returns: list of job instances, plus a "next page token" string:
if the token is not ``None``, indicates that more jobs can be
retrieved with another call, passing that value as
``page_token``).
"""
params = {'projection': 'full'}
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if all_users is not None:
params['allUsers'] = all_users
if state_filter is not None:
params['stateFilter'] = state_filter
path = '/projects/%s/jobs' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
jobs = [self.job_from_resource(resource) for resource in resp['jobs']]
return jobs, resp.get('nextPageToken')
def load_table_from_storage(self, job_name, destination, *source_uris):
"""Construct a job for loading data into a table from CloudStorage.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type job_name: str
:param job_name: Name of the job.
:type destination: :class:`gcloud.bigquery.table.Table`
:param destination: Table into which data is to be loaded.
:type source_uris: sequence of string
:param source_uris: URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
:rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob`
:returns: a new ``LoadTableFromStorageJob`` instance
"""
return LoadTableFromStorageJob(job_name, destination, source_uris,
client=self)
def copy_table(self, job_name, destination, *sources):
"""Construct a job for copying one or more tables into another table.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:type job_name: str
:param job_name: Name of the job.
:type destination: :class:`gcloud.bigquery.table.Table`
:param destination: Table into which data is to be copied.
:type sources: sequence of :class:`gcloud.bigquery.table.Table`
:param sources: tables to be copied.
:rtype: :class:`gcloud.bigquery.job.CopyJob`
:returns: a new ``CopyJob`` instance
"""
return CopyJob(job_name, destination, sources, client=self)
def extract_table_to_storage(self, job_name, source, *destination_uris):
"""Construct a job for extracting a table into Cloud Storage files.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extract
:type job_name: str
:param job_name: Name of the job.
:type source: :class:`gcloud.bigquery.table.Table`
:param source: table to be extracted.
:type destination_uris: sequence of string
:param destination_uris: URIs of CloudStorage file(s) into which
table data is to be extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
:rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob`
:returns: a new ``ExtractTableToStorageJob`` instance
"""
return ExtractTableToStorageJob(job_name, source, destination_uris,
client=self)
def run_async_query(self, job_name, query):
"""Construct a job for running a SQL query asynchronously.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query
:type job_name: str
:param job_name: Name of the job.
:type query: str
:param query: SQL query to be executed
:rtype: :class:`gcloud.bigquery.job.QueryJob`
:returns: a new ``QueryJob`` instance
"""
return QueryJob(job_name, query, client=self)
def run_sync_query(self, query):
"""Run a SQL query synchronously.
:type query: str
:param query: SQL query to be executed
:rtype: :class:`gcloud.bigquery.query.QueryResults`
:returns: a new ``QueryResults`` instance
"""
return QueryResults(query, client=self)
|
mit
|
jakobmoss/tsa
|
utils/makeweights.py
|
1
|
2350
|
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Generate statistical weigts from scatter
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
import bottleneck as bn
###############################################################################
# Functions
###############################################################################
def genweight(datname, dpath, wpath):
"""
Combine time series with statistical weights calculated from scatter
Arguments:
- `datname`: Identifier of data file
- `dpath` : Path to data file (time series).
- `wpath` : Path to scatter file (with same time points!)
"""
# Pretty print
print('Generating weights for {0} !'.format(dpath))
# Load data and weights
t, d = np.loadtxt(dpath, unpack=True)
tt, sig = np.loadtxt(wpath, unpack=True)
# Check that times are indeed the same
tdif = t - tt
if tdif.any() != 0:
print('Error! Not the same time points! Quitting!')
exit()
# Moving variance (Hans: M = 50 - 100)
M = 70
movstd = bn.move_std(sig, M, min_count=1)
movvar = np.square(movstd)
# Remove first point
x = 1
t = t[x:]
d = d[x:]
movvar = movvar[x:]
# Calculate weights from scatter (1 / variance)
w = np.divide(1.0, movvar)
# Save
outfile = star + '_with-weights.txt'
np.savetxt(outfile, np.transpose([t, d, w]), fmt='%.15e', delimiter='\t')
# Done!
print('Done!\n')
###############################################################################
# Script
###############################################################################
if __name__ == "__main__":
# Definitions
datdir = '../../data/'
ext = '.txt'
append = '-high'
# Run for star 1
star = 'star01'
genweight(star, datdir + star + ext, star + append + ext)
# Run for star 2
star = 'star02'
genweight(star, datdir + star + ext, star + append + ext)
|
mit
|
erstis-go-botting/sexy-bot
|
misc.py
|
1
|
1888
|
import os
#checks if settings.ini should be generated. if not given universe, username and password it will generate a settings.ini with the default account
#This settings_generator will only work for universe 82 if the flag argument is given als True(to make sure that universe 82 is intended)
def settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if (os.path.isfile('settings/settings.ini')):
print("settings file found, stopping now.")
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
if not (os.path.isdir('settings')):
os.makedir('settings')
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
def force_settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if not (os.path.isfile('settings/settings.ini')):
settings_generator(universe, username, password, flag)
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
#settings_generator()
|
mit
|
cainmatt/django
|
tests/template_tests/syntax_tests/test_autoescape.py
|
337
|
5575
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeTagTests(SimpleTestCase):
@setup({'autoescape-tag01': '{% autoescape off %}hello{% endautoescape %}'})
def test_autoescape_tag01(self):
output = self.engine.render_to_string('autoescape-tag01')
self.assertEqual(output, 'hello')
@setup({'autoescape-tag02': '{% autoescape off %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag02(self):
output = self.engine.render_to_string('autoescape-tag02', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
@setup({'autoescape-tag03': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag03(self):
output = self.engine.render_to_string('autoescape-tag03', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
# Autoescape disabling and enabling nest in a predictable way.
@setup({'autoescape-tag04': '{% autoescape off %}'
'{{ first }} {% autoescape on %}{{ first }}{% endautoescape %}{% endautoescape %}'})
def test_autoescape_tag04(self):
output = self.engine.render_to_string('autoescape-tag04', {'first': '<a>'})
self.assertEqual(output, '<a> <a>')
@setup({'autoescape-tag05': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag05(self):
output = self.engine.render_to_string('autoescape-tag05', {'first': '<b>first</b>'})
self.assertEqual(output, '<b>first</b>')
# Strings (ASCII or unicode) already marked as "safe" are not
# auto-escaped
@setup({'autoescape-tag06': '{{ first }}'})
def test_autoescape_tag06(self):
output = self.engine.render_to_string('autoescape-tag06', {'first': mark_safe('<b>first</b>')})
self.assertEqual(output, '<b>first</b>')
@setup({'autoescape-tag07': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag07(self):
output = self.engine.render_to_string('autoescape-tag07', {'first': mark_safe('<b>Apple</b>')})
self.assertEqual(output, '<b>Apple</b>')
@setup({'autoescape-tag08': r'{% autoescape on %}'
r'{{ var|default_if_none:" endquote\" hah" }}{% endautoescape %}'})
def test_autoescape_tag08(self):
"""
Literal string arguments to filters, if used in the result, are safe.
"""
output = self.engine.render_to_string('autoescape-tag08', {"var": None})
self.assertEqual(output, ' endquote" hah')
# Objects which return safe strings as their __str__ method
# won't get double-escaped.
@setup({'autoescape-tag09': r'{{ unsafe }}'})
def test_autoescape_tag09(self):
output = self.engine.render_to_string('autoescape-tag09', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'you & me')
@setup({'autoescape-tag10': r'{{ safe }}'})
def test_autoescape_tag10(self):
output = self.engine.render_to_string('autoescape-tag10', {'safe': SafeClass()})
self.assertEqual(output, 'you > me')
@setup({'autoescape-filtertag01': '{{ first }}{% filter safe %}{{ first }} x<y{% endfilter %}'})
def test_autoescape_filtertag01(self):
"""
The "safe" and "escape" filters cannot work due to internal
implementation details (fortunately, the (no)autoescape block
tags can be used in those cases)
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('autoescape-filtertag01', {'first': '<a>'})
@setup({'autoescape-ifequal01': '{% ifequal var "this & that" %}yes{% endifequal %}'})
def test_autoescape_ifequal01(self):
"""
ifequal compares unescaped vales.
"""
output = self.engine.render_to_string('autoescape-ifequal01', {'var': 'this & that'})
self.assertEqual(output, 'yes')
# Arguments to filters are 'safe' and manipulate their input unescaped.
@setup({'autoescape-filters01': '{{ var|cut:"&" }}'})
def test_autoescape_filters01(self):
output = self.engine.render_to_string('autoescape-filters01', {'var': 'this & that'})
self.assertEqual(output, 'this that')
@setup({'autoescape-filters02': '{{ var|join:" & " }}'})
def test_autoescape_filters02(self):
output = self.engine.render_to_string('autoescape-filters02', {'var': ('Tom', 'Dick', 'Harry')})
self.assertEqual(output, 'Tom & Dick & Harry')
@setup({'autoescape-literals01': '{{ "this & that" }}'})
def test_autoescape_literals01(self):
"""
Literal strings are safe.
"""
output = self.engine.render_to_string('autoescape-literals01')
self.assertEqual(output, 'this & that')
@setup({'autoescape-stringiterations01': '{% for l in var %}{{ l }},{% endfor %}'})
def test_autoescape_stringiterations01(self):
"""
Iterating over strings outputs safe characters.
"""
output = self.engine.render_to_string('autoescape-stringiterations01', {'var': 'K&R'})
self.assertEqual(output, 'K,&,R,')
@setup({'autoescape-lookup01': '{{ var.key }}'})
def test_autoescape_lookup01(self):
"""
Escape requirement survives lookup.
"""
output = self.engine.render_to_string('autoescape-lookup01', {'var': {'key': 'this & that'}})
self.assertEqual(output, 'this & that')
|
bsd-3-clause
|
ActiveState/code
|
recipes/Python/275366_Email_address_leech/recipe-275366.py
|
1
|
1624
|
import re
def test():
text = \
''' You can contact us at [email protected]
or at yourname AT server DOT site DOT com.
Also at o u r n a m e @ s e r v e r dot s i t e dot c o m
and t.h.e.i.r.n.a.m.e at server dot s/i/t/e DOT COM.
'''
for email in emailLeech(text): print email
DOMAINS = ["com","edu","net","org","gov","us"] #.. and so on
FLAGS = re.IGNORECASE | re.VERBOSE
AT = r'(?: @ | \b A \s* T \b)'
ADDRESSPART = r'\b (?: \w+ | \w (?:(?:\s+|\W) \w)*) \b'
DOMAIN = r'(?:%s)' % '|'.join(["(?:\s*|\W)".join(domain) for domain in DOMAINS])
NONWORD = re.compile(r'\W+')
DOT_REGEX = re.compile(r'(?: \. | \b D \s* O \s* T \b)', FLAGS)
EMAIL_REGEX = re.compile(
(r'(?P<name>%s) \W* %s \W*' % (ADDRESSPART,AT)) +
r'(?P<site>(?: %s \W* %s \W*)+)' % (ADDRESSPART, DOT_REGEX.pattern) +
r'(?P<domain>%s)' % DOMAIN, FLAGS)
def emailLeech(text):
''' An iterator over recognized email addresses within text'''
while (True):
match = EMAIL_REGEX.search(text)
if not match: break
parts = [match.group("name")] + \
DOT_REGEX.split(match.group("site")) + \
[match.group("domain")]
# discard non word chars
parts = [NONWORD.sub('',part) for part in parts]
# discard all empty parts and make lowercase
parts = [part.lower() for part in parts if len(part)>0]
# join the parts
yield "%s@%s.%s" % (parts[0], '.'.join(parts[1:-1]), parts[-1])
text = text[match.end():]
if __name__ == '__main__': test()
|
mit
|
mosdef-hub/foyer
|
foyer/tests/test_forcefield_parameters.py
|
1
|
10029
|
import numpy as np
import pytest
from foyer import Forcefield, forcefields
from foyer.exceptions import MissingForceError, MissingParametersError
from foyer.forcefield import get_available_forcefield_loaders
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn
@pytest.mark.skipif(
condition="load_GAFF"
not in map(lambda func: func.__name__, get_available_forcefield_loaders()),
reason="GAFF Plugin is not installed",
)
class TestForcefieldParameters(BaseTest):
@pytest.fixture(scope="session")
def gaff(self):
return forcefields.load_GAFF()
def test_gaff_missing_group(self, gaff):
with pytest.raises(ValueError):
gaff.get_parameters("missing", key=[])
def test_gaff_non_string_keys(self, gaff):
with pytest.raises(TypeError):
gaff.get_parameters("atoms", key=1)
def test_gaff_bond_parameters_gaff(self, gaff):
bond_params = gaff.get_parameters("harmonic_bonds", ["br", "ca"])
assert np.isclose(bond_params["length"], 0.19079)
assert np.isclose(bond_params["k"], 219827.36)
def test_gaff_bond_params_reversed(self, gaff):
assert gaff.get_parameters(
"harmonic_bonds", ["ca", "br"]
) == gaff.get_parameters("harmonic_bonds", ["ca", "br"])
def test_gaff_missing_bond_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_bonds", ["str1", "str2"])
def test_gaff_angle_parameters(self, gaff):
angle_params = gaff.get_parameters("harmonic_angles", ["f", "c1", "f"])
assert np.allclose(
[angle_params["theta"], angle_params["k"]],
[3.141592653589793, 487.0176],
)
def test_gaff_angle_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"harmonic_angles", ["f", "c2", "ha"]
).values()
),
list(
gaff.get_parameters(
"harmonic_angles", ["ha", "c2", "f"]
).values()
),
)
def test_gaff_missing_angle_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_angles", ["1", "2", "3"])
def test_gaff_periodic_proper_parameters(self, gaff):
periodic_proper_params = gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
)
assert np.allclose(periodic_proper_params["periodicity"], [2.0, 1.0])
assert np.allclose(
periodic_proper_params["k"], [9.414, 5.4392000000000005]
)
assert np.allclose(
periodic_proper_params["phase"],
[3.141592653589793, 3.141592653589793],
)
def test_gaff_periodic_proper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
).values()
),
list(
gaff.get_parameters(
"periodic_propers", ["hs", "sh", "c", "c3"]
).values()
),
)
def test_gaff_periodic_improper_parameters(self, gaff):
periodic_improper_params = gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
)
assert np.allclose(periodic_improper_params["periodicity"], [2.0])
assert np.allclose(periodic_improper_params["k"], [4.6024])
assert np.allclose(
periodic_improper_params["phase"], [3.141592653589793]
)
def test_gaff_periodic_improper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
).values()
),
list(
gaff.get_parameters(
"periodic_impropers", ["c", "o", "", "o"]
).values()
),
)
def test_gaff_proper_params_missing(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("periodic_impropers", ["a", "b", "c", "d"])
def test_gaff_scaling_factors(self, gaff):
assert gaff.lj14scale == 0.5
assert np.isclose(gaff.coulomb14scale, 0.833333333)
def test_opls_get_parameters_atoms(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", "opls_145")
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atoms_list(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", ["opls_145"])
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atom_class(self, oplsaa):
atom_params = oplsaa.get_parameters(
"atoms", "CA", keys_are_atom_classes=True
)
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_bonds(self, oplsaa):
bond_params = oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
)
assert bond_params["length"] == 0.146
assert bond_params["k"] == 334720.0
def test_opls_get_parameters_bonds_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_145", "opls_760"]
).values()
),
)
def test_opls_get_parameters_bonds_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["C_2", "O_2"], True
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["O_2", "C_2"], True
).values()
),
)
def test_opls_get_parameters_angle(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.0943950239, 585.76]
)
def test_opls_get_parameters_angle_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_167", "opls_772", "opls_166"]
).values()
),
)
def test_opls_get_parameters_angle_atom_classes(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["CA", "C_2", "CA"], keys_are_atom_classes=True
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.09439510239, 711.28]
)
def test_opls_get_parameters_angle_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles",
["CA", "C", "O"],
keys_are_atom_classes=True,
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles",
["O", "C", "CA"],
keys_are_atom_classes=True,
).values()
),
)
def test_opls_get_parameters_rb_proper(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["opls_215", "opls_215", "opls_235", "opls_269"]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[2.28446, 0.0, -2.28446, 0.0, 0.0, 0.0],
)
def test_get_parameters_rb_proper_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"rb_propers",
["opls_215", "opls_215", "opls_235", "opls_269"],
).values()
),
list(
oplsaa.get_parameters(
"rb_propers",
["opls_269", "opls_235", "opls_215", "opls_215"],
).values()
),
)
def test_opls_get_parameters_wildcard(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["", "opls_235", "opls_544", ""]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[30.334, 0.0, -30.334, 0.0, 0.0, 0.0],
)
def test_opls_missing_force(self, oplsaa):
with pytest.raises(MissingForceError):
oplsaa.get_parameters("periodic_propers", key=["a", "b", "c", "d"])
def test_opls_scaling_factors(self, oplsaa):
assert oplsaa.lj14scale == 0.5
assert oplsaa.coulomb14scale == 0.5
def test_missing_scaling_factors(self):
ff = Forcefield(forcefield_files=(get_fn("validate_customtypes.xml")))
with pytest.raises(AttributeError):
assert ff.lj14scale
with pytest.raises(AttributeError):
assert ff.coulomb14scale
|
mit
|
kamilmowinski/nao_gesture
|
scripts/nao.py
|
2
|
1999
|
#!/usr/bin/env python
import rospy
import math
from naoqi import ALProxy
from my_kinnect.msg import NaoCoords
class NaoMonkey:
PART = {
'LShoulder': ['LShoulderPitch', 'LShoulderRoll'],
'RShoulder': ['RShoulderPitch', 'RShoulderRoll'],
'LElbow': ['LElbowYaw', 'LElbowRoll'],
'RElbow': ['RElbowYaw', 'RElbowRoll'],
'Head': ['HeadYaw', 'HeadPitch'],
}
LIMITS = {
'Head': [[-2.0, 2.0], [-0.67, 0.51]],
'LShoulder': [[-2.0, 2.0], [-0.31, 1.32]],
'RShoulder': [[-2.0, 2.0], [-1.32, 0.31]],
'LElbow': [[-2.0, 2.0], [-1.54, -0.03]],
'RElbow': [[-2.0, 2.0], [0.03, 1.54]],
}
def __init__(self):
rospy.init_node('nao_mykinect', anonymous=True)
self.listener = rospy.Subscriber('nao', NaoCoords, self.move)
ip = rospy.get_param('~ip', '10.104.16.141')
port = int(rospy.get_param('~port', '9559'))
self.al = ALProxy("ALAutonomousLife", ip, port)
self.postureProxy = ALProxy("ALRobotPosture", ip, port)
self.motionProxy = ALProxy("ALMotion", ip, port)
self.al.setState("disabled")
for part in ["Head", "LArm", "RArm"]:
self.motionProxy.setStiffnesses(part, 1.0)
rospy.loginfo(self.motionProxy.getSummary())
def move(self, coords):
part = coords.Part.data
angles1 = coords.Angles1
angles2 = coords.Angles2
angles = [float(angles1.data), float(angles2.data)]
speed = 1.0
if part not in NaoMonkey.PART:
error_msg = 'Wat? I Do not have ' + str(part)
rospy.loginfo(error_msg)
return
if len(NaoMonkey.PART[part]) != len(angles):
error_msg = 'Wat? What shall i do with rest joint?'
rospy.loginfo(error_msg)
return
angles = map(lambda x: float(x)*math.pi/180.0, angles)
for limit, angle in zip(NaoMonkey.LIMITS[part], angles):
if angle < limit[0] or angle > limit[1]:
error_msg = 'Wat? Limits man!'
rospy.loginfo(error_msg)
self.motionProxy.setAngles(NaoMonkey.PART[part], angles, speed);
if __name__ == '__main__':
try:
NaoMonkey()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
gpl-2.0
|
piosz/test-infra
|
gubernator/pb_glance_test.py
|
36
|
1815
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pb_glance
def tostr(data):
if isinstance(data, list):
return ''.join(c if isinstance(c, str) else chr(c) for c in data)
return data
class PBGlanceTest(unittest.TestCase):
def expect(self, data, expected, types=None):
result = pb_glance.parse_protobuf(tostr(data), types)
self.assertEqual(result, expected)
def test_basic(self):
self.expect(
[0, 1, # varint
0, 0x96, 1, # multi-byte varint
(1<<3)|1, 'abcdefgh', # 64-bit
(2<<3)|2, 5, 'value', # length-delimited (string)
(3<<3)|5, 'abcd', # 32-bit
],
{
0: [1, 150],
1: ['abcdefgh'],
2: ['value'],
3: ['abcd'],
})
def test_embedded(self):
self.expect([2, 2, 3<<3, 1], {0: [{3: [1]}]}, {0: {}})
def test_field_names(self):
self.expect([2, 2, 'hi'], {'greeting': ['hi']}, {0: 'greeting'})
def test_embedded_names(self):
self.expect(
[2, 4, (3<<3)|2, 2, 'hi'],
{'msg': [{'greeting': ['hi']}]},
{0: {'name': 'msg', 3: 'greeting'}})
|
apache-2.0
|
vismartltd/edx-platform
|
common/test/acceptance/pages/lms/open_response.py
|
165
|
4579
|
"""
Open-ended response in the courseware.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from .rubric import RubricPage
class OpenResponsePage(PageObject):
"""
Open-ended response in the courseware.
"""
url = None
def is_browser_on_page(self):
return self.q(css='div.xmodule_CombinedOpenEndedModule').present
@property
def assessment_type(self):
"""
Return the type of assessment currently active.
Options are "self", "ai", or "peer"
"""
labels = self.q(css='section#combined-open-ended-status>div.statusitem-current').text
if len(labels) < 1:
self.warning("Could not find assessment type label")
# Provide some tolerance to UI changes
label_compare = labels[0].lower().strip()
if 'self' in label_compare:
return 'self'
elif 'ai' in label_compare:
return 'ai'
elif 'peer' in label_compare:
return 'peer'
else:
raise ValueError("Unexpected assessment type: '{0}'".format(label_compare))
@property
def prompt(self):
"""
Return an HTML string representing the essay prompt.
"""
prompt_css = "section.open-ended-child>div.prompt"
prompts = self.q(css=prompt_css).map(lambda el: el.get_attribute('innerHTML').strip()).results
if len(prompts) == 0:
self.warning("Could not find essay prompt on page.")
return ""
elif len(prompts) > 1:
self.warning("Multiple essay prompts found on page; using the first one.")
return prompts[0]
@property
def rubric(self):
"""
Return a `RubricPage` for a self-assessment problem.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
@property
def written_feedback(self):
"""
Return the written feedback from the grader (if any).
If no feedback available, returns None.
"""
feedback = self.q(css='div.written-feedback').text
if len(feedback) > 0:
return feedback[0]
else:
return None
@property
def alert_message(self):
"""
Alert message displayed to the user.
"""
alerts = self.q(css="div.open-ended-alert").text
if len(alerts) < 1:
return ""
else:
return alerts[0]
@property
def grader_status(self):
"""
Status message from the grader.
If not present, return an empty string.
"""
status_list = self.q(css='div.grader-status').text
if len(status_list) < 1:
self.warning("No grader status found")
return ""
elif len(status_list) > 1:
self.warning("Multiple grader statuses found; returning the first one")
return status_list[0]
def set_response(self, response_str):
"""
Input a response to the prompt.
"""
input_css = "textarea.short-form-response"
self.q(css=input_css).fill(response_str)
def save_response(self):
"""
Save the response for later submission.
"""
self.q(css='input.save-button').first.click()
EmptyPromise(
lambda: 'save' in self.alert_message.lower(),
"Status message saved"
).fulfill()
def submit_response(self):
"""
Submit a response for grading.
"""
self.q(css='input.submit-button').first.click()
# modal dialog confirmation
self.q(css='button.ok-button').first.click()
# Ensure that the submission completes
self._wait_for_submitted(self.assessment_type)
def _wait_for_submitted(self, assessment_type):
"""
Wait for the submission to complete.
`assessment_type` is either 'self', 'ai', or 'peer'
"""
if assessment_type == 'self':
RubricPage(self.browser).wait_for_page()
elif assessment_type == 'ai' or assessment_type == "peer":
EmptyPromise(
lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'"
).fulfill()
else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
EmptyPromise(lambda: True, "Unrecognized assessment type").fulfill()
|
agpl-3.0
|
WeichenXu123/spark
|
examples/src/main/python/ml/min_hash_lsh_example.py
|
52
|
3222
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating MinHashLSH.
Run with:
bin/spark-submit examples/src/main/python/ml/min_hash_lsh_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.feature import MinHashLSH
from pyspark.ml.linalg import Vectors
from pyspark.sql.functions import col
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("MinHashLSHExample") \
.getOrCreate()
# $example on$
dataA = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
(1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
(2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
dfA = spark.createDataFrame(dataA, ["id", "features"])
dataB = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
(4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
(5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
dfB = spark.createDataFrame(dataB, ["id", "features"])
key = Vectors.sparse(6, [1, 3], [1.0, 1.0])
mh = MinHashLSH(inputCol="features", outputCol="hashes", numHashTables=5)
model = mh.fit(dfA)
# Feature Transformation
print("The hashed dataset where hashed values are stored in the column 'hashes':")
model.transform(dfA).show()
# Compute the locality sensitive hashes for the input rows, then perform approximate
# similarity join.
# We could avoid computing hashes by passing in the already-transformed dataset, e.g.
# `model.approxSimilarityJoin(transformedA, transformedB, 0.6)`
print("Approximately joining dfA and dfB on distance smaller than 0.6:")
model.approxSimilarityJoin(dfA, dfB, 0.6, distCol="JaccardDistance")\
.select(col("datasetA.id").alias("idA"),
col("datasetB.id").alias("idB"),
col("JaccardDistance")).show()
# Compute the locality sensitive hashes for the input rows, then perform approximate nearest
# neighbor search.
# We could avoid computing hashes by passing in the already-transformed dataset, e.g.
# `model.approxNearestNeighbors(transformedA, key, 2)`
# It may return less than 2 rows when not enough approximate near-neighbor candidates are
# found.
print("Approximately searching dfA for 2 nearest neighbors of the key:")
model.approxNearestNeighbors(dfA, key, 2).show()
# $example off$
spark.stop()
|
apache-2.0
|
a2ultimate/ultimate-league-app
|
src/ultimate/utils/google_api.py
|
2
|
7829
|
from datetime import datetime
import dateutil.parser
import httplib2
import logging
from django.conf import settings
from django.utils.timezone import make_aware
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
logger = logging.getLogger('a2u.email_groups')
class GoogleAppsApi:
http = None
service = None
def __init__(self):
credentials_file = getattr(settings, 'GOOGLE_APPS_API_CREDENTIALS_FILE', False)
scopes = getattr(settings, 'GOOGLE_APPS_API_SCOPES', False)
account = getattr(settings, 'GOOGLE_APPS_API_ACCOUNT', False)
if credentials_file and scopes and account:
credentials = ServiceAccountCredentials.from_json_keyfile_name(
credentials_file, scopes=scopes)
credentials._kwargs['sub'] = account
self.http = httplib2.Http()
self.http = credentials.authorize(self.http)
def prepare_group_for_sync(self, group_name, group_id=None, group_email_address=None, force=False):
logger.debug('Preparing group "{}" for sync...'.format(group_name))
if force:
self.delete_group(group_id=group_id, group_email_address=group_email_address)
else:
self.remove_all_group_members(
group_id=group_id,
group_email_address=group_email_address,
group_name=group_name)
return self.get_or_create_group(
group_email_address=group_email_address, group_name=group_name)
# TODO need paging for when you have over 200 groups
def get_or_create_group(self, group_email_address, group_name=''):
logger.debug(' Getting or creating group {}...'.format(group_email_address))
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
groups_response = None
target_group = None
try:
logger.debug(' Looking for existing group...')
groups_response = service.groups().list(customer='my_customer', domain='lists.annarborultimate.org', query='email={}'.format(group_email_address)).execute(http=self.http)
except Exception as e:
return None
if groups_response and groups_response.get('groups'):
for group in groups_response.get('groups'):
if group.get('email') == group_email_address:
logger.debug(' Group found!')
target_group = group
# couldn't find group, create it
if not target_group:
logger.debug(' Group not found...creating {}...'.format(group_email_address))
body = { 'email': group_email_address, }
if group_name:
body.update({ 'name': group_name, })
try:
target_group = service.groups().insert(body=body).execute(http=self.http)
logger.debug(' Success!')
except Exception as e:
logger.debug(' Failure!')
return None
group_id = target_group.get('id', None)
return group_id
def delete_group(self, group_id=None, group_email_address=None):
logger.debug(' Deleting existing group...')
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
if group_email_address and not group_id:
try:
groups_response = service.groups().list(customer='my_customer', domain='lists.annarborultimate.org', query='email={}'.format(group_email_address)).execute(http=self.http)
if groups_response and groups_response.get('groups'):
for group in groups_response.get('groups'):
if group.get('email') == group_email_address:
group_id = group.get('id', None)
except Exception as e:
return False
if group_id:
try:
service.groups().delete(groupKey=group_id).execute(http=self.http)
logger.debug(' Success!')
except Exception as e:
logger.debug(' Failure!')
return False
return True
def remove_all_group_members(self, group_id=None, group_email_address=None, group_name=None):
logger.debug(' Removing all members from {}...'.format(group_email_address))
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
if group_email_address and not group_id:
try:
groups_response = service.groups().list(customer='my_customer', domain='lists.annarborultimate.org', query='email={}'.format(group_email_address)).execute(http=self.http)
if groups_response and groups_response.get('groups'):
for group in groups_response.get('groups'):
if group.get('email') == group_email_address:
group_id = group.get('id', None)
except Exception as e:
logger.debug(' Group could not be found')
return False
if group_id:
try:
members_response = service.members().list(groupKey=group_id).execute(http=self.http)
if members_response and members_response.get('members'):
for member in members_response.get('members'):
member_id = member.get('id', None)
service.members().delete(groupKey=group_id, memberKey=member_id).execute(http=self.http)
except Exception as e:
logger.debug(' Group could not be found')
return False
logger.debug(' Done')
def add_group_member(self, email_address, group_id=None, group_email_address=None, group_name=None):
logger.debug('Adding {} to {}...'.format(email_address, group_email_address or 'group'))
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
body = {
'email': email_address,
'role': 'MEMBER'
}
response = False
# look for group
if not group_id and group_email_address:
group_id = self.get_or_create_group(
group_email_address=group_email_address, group_name=group_name)
if group_id:
try:
response = service.members().insert(groupKey=group_id, body=body).execute(http=self.http)
logger.debug(' Success!')
except:
logger.debug(' Failure!')
return False
return response
def get_calendar_events(self, calendar_id, since, until):
service = build(serviceName='calendar', version='v3', http=self.http, cache_discovery=False)
since = (datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) - since).isoformat('T') + 'Z'
until = (datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + until).isoformat('T') + 'Z'
try:
events_response = service.events().list(
calendarId=calendar_id,
orderBy='startTime',
singleEvents=True,
timeMin=since,
timeMax=until,
).execute(http=self.http)
except Exception as e:
return None
events = []
for event in events_response['items']:
events.append({
'summary': event.get('summary'),
'start': dateutil.parser.parse(event['start']['dateTime']),
'end': event['end']['dateTime'],
'location': event.get('location'),
'description': event.get('description'),
})
return events
|
bsd-3-clause
|
Rapportus/ansible-modules-extras
|
cloud/vmware/vmware_dns_config.py
|
75
|
3970
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_dns_config
short_description: Manage VMware ESXi DNS Configuration
description:
- Manage VMware ESXi DNS Configuration
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
change_hostname_to:
description:
- The hostname that an ESXi host should be changed to.
required: True
domainname:
description:
- The domain the ESXi host should be apart of.
required: True
dns_servers:
description:
- The DNS servers that the host should be configured to use.
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_dns_config command from Ansible Playbooks
- name: Configure ESXi hostname and DNS servers
local_action:
module: vmware_dns_config
hostname: esxi_hostname
username: root
password: your_password
change_hostname_to: esx01
domainname: foo.org
dns_servers:
- 8.8.8.8
- 8.8.4.4
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def configure_dns(host_system, hostname, domainname, dns_servers):
changed = False
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
config = host_network_system.dnsConfig
config.dhcp = False
if config.address != dns_servers:
config.address = dns_servers
changed = True
if config.domainName != domainname:
config.domainName = domainname
changed = True
if config.hostName != hostname:
config.hostName = hostname
changed = True
if changed:
host_network_system.UpdateDnsConfig(config)
return changed
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
domainname=dict(required=True, type='str'),
dns_servers=dict(required=True, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
change_hostname_to = module.params['change_hostname_to']
domainname = module.params['domainname']
dns_servers = module.params['dns_servers']
try:
content = connect_to_api(module)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
ecell/ecell3
|
ecell/pyecell/ecell/analysis/PathwayProxy.py
|
1
|
13263
|
#!/usr/bin/env python
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
"""
A program for handling and defining a pathway.
This program is the extension package for E-Cell System Version 3.
"""
__program__ = 'PathwayProxy'
__version__ = '1.0'
__author__ = 'Kazunari Kaizu <[email protected]>'
__coyright__ = ''
__license__ = ''
import ecell.eml
from ecell.ecssupport import *
from ecell.analysis.util import createVariableReferenceFullID
import copy
import numpy
class PathwayProxy:
def __init__( self, anEmlSupport, processList=None ):
'''
anEmlSupport: Eml support object
processList: (list) a list of process full path
'''
self.theEmlSupport = anEmlSupport
if processList:
self.setProcessList( processList )
else:
self.setProcessList( [] )
# end of __init__
def setProcessList( self, processList ):
'''
set and detect a pathway
processList: (list) a list of process full ID
'''
# check the existence of processes,
# and create relatedVariableList
self.__processList = []
self.__variableList = []
for processFullID in processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
self.__processList.append( processFullID )
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__processList.sort()
self.__variableList.sort()
# end of setProcessList
def getProcessList( self ):
'''
return processList
'''
return copy.copy( self.__processList )
# end of getProcessList
def addProcess( self, processFullID ):
'''
add a process to the pathway
processFullID: (str) a process full ID
'''
if not self.__processList.count( processFullID ) == 0:
return
# elif not ecell.eml.Eml.isEntityExist( processFullID ):
# return
# add process
self.__processList.append( processFullID )
self.__processList.sort()
# update the related variable list
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
return
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__variableList.sort()
# end of addProcess
def removeProcess( self, processIndexList ):
'''
remove processes from the pathway
processIndexList: (list) a list of indices of processes
'''
indexList = copy.copy( processIndexList )
indexList.sort()
indexList.reverse()
removedProcessList = []
for i in indexList:
if len( self.__processList ) > i:
removedProcessList.append( self.__processList.pop( i ) )
removedVariableList = []
for processFullID in removedProcessList:
# if not ecell.eml.Eml.isEntityExist( self.theEmlSupport, processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if removedVariableList.count( fullIDString ) == 0:
removedVariableList.append( fullIDString )
for processFullID in self.__processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if not removedVariableList.count( fullIDString ) == 0:
removedVariableList.remove( fullIDString )
for variableFullID in removedVariableList:
self.__variableList.remove( variableFullID )
# end of removeProcess
def take( self, processIndexList ):
'''
create and return a sub-pathway
processIndexList: (list) a list of indices of processes
return PathwayProxy
'''
processList = []
for i in processIndexList:
if len( self.__processList ) > i:
processList.append( self.__processList[ i ] )
subPathway = PathwayProxy( self.theEmlSupport, processList )
return subPathway
# end of removeProcess
def getVariableList( self ):
'''
return relatedVariableList
'''
return copy.copy( self.__variableList )
# end of getVariableList
def removeVariable( self, variableIndexList ):
'''
remove variables from the pathway
variableIndexList: (list) a list of indices of variables
'''
indexList = copy.copy( variableIndexList )
indexList.sort()
indexList.reverse()
for i in indexList:
if len( self.__variableList ) > i:
self.__variableList.pop( i )
# end of removeVariable
def addVariable( self, variableFullID ):
'''
recover a removed variable to the pathway
variableFullID: (str) a variable full ID
'''
if not self.__variableList.count( variableFullID ) == 0:
return 1
# elif not ecell.eml.Eml.isEntityExist( variableFullID ):
# return 0
for processFullID in self.__processList:
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = fullID[ 1 ] + ':' + fullID[ 2 ]
if fullIDString == variableFullID:
self.__variableList.append( variableFullID )
self.__variableList.sort()
return 1
return 0
# end of addProcess
def getIncidentMatrix( self, mode=0 ):
'''
create the incident matrix (array)
mode: (0 or 1) 0 means that only the \'write\' variables are checked. 0 is set as default.
return incidentMatrix
'''
incidentMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ) )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if mode:
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
incidentMatrix[ i ][ j ] = 1
else:
incidentMatrix[ i ][ j ] = 1
return incidentMatrix
# end of getIncidentMatrix
def getStoichiometryMatrix( self ):
'''
create the stoichiometry matrix (array)
return stoichiometryMatrix
'''
stoichiometryMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ), float )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
stoichiometryMatrix[ i ][ j ] += coeff
return stoichiometryMatrix
# end of getStoichiometryMatrix
def getReversibilityList( self ):
'''
check and return the reversibilities (isReversible) for processes
default value is 0, irreversible
return reversibilityList
'''
reversibilityList = []
for processFullID in self.__processList:
propertyList = self.theEmlSupport.getEntityPropertyList( processFullID )
if propertyList.count( 'isReversible' ) != 0:
# isReversible is handled as float
isReversible = float( self.theEmlSupport.getEntityProperty( processFullID + ':isReversible' )[ 0 ] )
reversibilityList.append( int( isReversible ) )
else:
# default value, irreversible
reversibilityList.append( 0 )
return reversibilityList
# end of getReversibilityList
# end of PathwayProxy
if __name__ == '__main__':
from emlsupport import EmlSupport
import sys
import os
def main( filename ):
anEmlSupport = EmlSupport( filename )
pathwayProxy = anEmlSupport.createPathwayProxy()
print 'process list ='
print pathwayProxy.getProcessList()
print 'related variable list ='
print pathwayProxy.getVariableList()
print 'incident matrix ='
print pathwayProxy.getIncidentMatrix()
print 'stoichiometry matrix ='
print pathwayProxy.getStoichiometryMatrix()
print 'reversibility list ='
print pathwayProxy.getReversibilityList()
# end of main
if len( sys.argv ) > 1:
main( sys.argv[ 1 ] )
else:
filename = '../../../../doc/samples/Heinrich/Heinrich.eml'
main( os.path.abspath( filename ) )
|
lgpl-3.0
|
pymedusa/Medusa
|
ext/boto/pyami/scriptbase.py
|
153
|
1427
|
import os
import sys
from boto.utils import ShellCommand, get_ts
import boto
import boto.utils
class ScriptBase(object):
def __init__(self, config_file=None):
self.instance_id = boto.config.get('Instance', 'instance-id', 'default')
self.name = self.__class__.__name__
self.ts = get_ts()
if config_file:
boto.config.read(config_file)
def notify(self, subject, body=''):
boto.utils.notify(subject, body)
def mkdir(self, path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
boto.log.error('Error creating directory: %s' % path)
def umount(self, path):
if os.path.ismount(path):
self.run('umount %s' % path)
def run(self, command, notify=True, exit_on_error=False, cwd=None):
self.last_command = ShellCommand(command, cwd=cwd)
if self.last_command.status != 0:
boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output))
if notify:
self.notify('Error encountered',
'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \
(command, self.last_command.output))
if exit_on_error:
sys.exit(-1)
return self.last_command.status
def main(self):
pass
|
gpl-3.0
|
Ichag/odoo
|
addons/hr_expense/report/__init__.py
|
380
|
1071
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_expense_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
AydinSakar/node-gyp
|
gyp/pylib/gyp/xcode_emulation.py
|
65
|
42931
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkVersionInfoItem(self, sdk, infoitem):
job = subprocess.Popen(['xcodebuild', '-version', '-sdk', sdk, infoitem],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running xcodebuild' % job.returncode)
return out.rstrip('\n')
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx')
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
|
mit
|
BayanGroup/ansible
|
lib/ansible/utils/module_docs_fragments/mysql.py
|
18
|
2735
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jonathan Mainguy <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard mysql documentation fragment
DOCUMENTATION = '''
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_port:
description:
- Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
required: false
default: 3306
login_unix_socket:
description:
- The path to a Unix domain socket for local connections
required: false
default: null
config_file:
description:
- Specify a config file from which user and password are to be read
required: false
default: '~/.my.cnf'
version_added: "2.0"
ssl_ca:
required: false
default: null
version_added: "2.0"
description:
- The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server.
ssl_cert:
required: false
default: null
version_added: "2.0"
description:
- The path to a client public key certificate.
ssl_key:
required: false
default: null
version_added: "2.0"
description:
- The path to the client private key.
requirements:
- MySQLdb
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this
is as easy as yum install MySQL-python. (See M(yum).)
- Both C(login_password) and C(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of 'root' with no password.
'''
|
gpl-3.0
|
mmilaprat/policycompass-services
|
apps/metricsmanager/api.py
|
2
|
5677
|
import json
from django.core.exceptions import ValidationError
from django import shortcuts
from rest_framework.views import APIView
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework import generics, status
from policycompass_services import permissions
from .serializers import *
from .normalization import get_normalizers
from . import formula, services
class MetricsBase(APIView):
def get(self, request, format=None):
"""
:type request: Request
:param request:
:return:
"""
result = {
"Metrics": reverse('metrics-create-list', request=request),
"Normalizer": reverse('normalizers-list', request=request),
"Calculator": reverse('calculate-dataset', request=request)
}
return Response(result)
class FormulasValidate(APIView):
def get(self, request):
if "formula" not in request.QUERY_PARAMS:
return Response({"formula": "Can not be empty"},
status=status.HTTP_400_BAD_REQUEST)
if "variables" not in request.QUERY_PARAMS:
return Response({"variables": "Can not be empty"},
status=status.HTTP_400_BAD_REQUEST)
formula_str = request.QUERY_PARAMS["formula"]
try:
variables = json.loads(request.QUERY_PARAMS["variables"])
except ValueError as e:
return Response(
{"variables": "Unable to parse json: {}".format(e)},
status=status.HTTP_400_BAD_REQUEST)
try:
variables = formula.validate_variables(variables)
formula.validate_formula(formula_str, variables)
except ValidationError as e:
return Response(e.error_dict, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
class NormalizersList(APIView):
def get(self, request):
normalizers = get_normalizers().values()
serializer = NormalizerSerializer(normalizers, many=True)
return Response(serializer.data)
class MetricsCreate(generics.ListCreateAPIView):
model = Metric
serializer_class = MetricSerializer
paginate_by = 10
paginate_by_param = 'page_size'
permission_classes = IsAuthenticatedOrReadOnly,
def pre_save(self, obj):
obj.creator_path = self.request.user.resource_path
class MetricsDetail(generics.RetrieveUpdateDestroyAPIView):
model = Metric
serializer_class = MetricSerializer
permission_classes = permissions.IsCreatorOrReadOnly,
class DatasetCalculateView(APIView):
permission_classes = IsAuthenticatedOrReadOnly,
def post(self, request):
"""
Compute a new dataset from a given formula and mappings.
Example data:
{
"title": "Some test",
"formula": "0.5 * norm(__1__, 0, 100) + 0.5 * norm(__2__, 0, 200)",
"datasets": [
{
"variable": "__1__",
"dataset": 1,
},
{
"variable": "__1__",
"dataset": 1,
}
],
"indicator_id": 0,
"unit_id": 0,
}
"""
# check resquest data
serializer = CalculateSerializer(data=request.DATA,
files=request.FILES)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
data = serializer.object
try:
formula.validate_formula(data["formula"], data["datasets"])
data = services.validate_operationalize(data)
except ValidationError as e:
return Response(e.error_dict, status=status.HTTP_400_BAD_REQUEST)
creator_path = self.request.user.resource_path
dataset_id = services.compute_dataset(
creator_path=creator_path,
**data)
return Response({
"dataset": {
"id": dataset_id
}
})
class MetriscOperationalize(APIView):
permission_classes = IsAuthenticatedOrReadOnly,
def post(self, request, metrics_id: int):
"""
Compute a new dataset from a given metric and mappings for variables.
Example data:
{
"title" : "Some test",
"datasets": [
{
"variable": "__1__",
"dataset": 1,
}
],
"unit_id": 0,
}
"""
# check if metric exists
metric = shortcuts.get_object_or_404(Metric, pk=metrics_id)
# check resquest data
serializer = OperationalizeSerializer(data=request.DATA,
files=request.FILES)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
data = serializer.object
try:
data = services.validate_operationalize(data)
except ValidationError as e:
return Response(e.error_dict, status=status.HTTP_400_BAD_REQUEST)
creator_path = self.request.user.resource_path
dataset_id = services.compute_dataset(
creator_path=creator_path,
formula=metric.formula,
indicator_id=metric.indicator_id,
metric_id=metric.pk,
**data)
return Response({
"dataset": {
"id": dataset_id
}
})
|
agpl-3.0
|
yosukesuzuki/let-me-notify
|
project/kay/management/gae_bulkloader.py
|
10
|
125396
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Imports data over HTTP.
Usage:
%(arg0)s [flags]
--debug Show debugging information. (Optional)
--app_id=<string> Application ID of endpoint (Optional for
*.appspot.com)
--auth_domain=<domain> The auth domain to use for logging in and for
UserProperties. (Default: gmail.com)
--bandwidth_limit=<int> The maximum number of bytes per second for the
aggregate transfer of data to the server. Bursts
may exceed this, but overall transfer rate is
restricted to this rate. (Default 250000)
--batch_size=<int> Number of Entity objects to include in each post to
the URL endpoint. The more data per row/Entity, the
smaller the batch size should be. (Default 10)
--config_file=<path> File containing Model and Loader definitions.
(Required unless --dump or --restore are used)
--db_filename=<path> Specific progress database to write to, or to
resume from. If not supplied, then a new database
will be started, named:
bulkloader-progress-TIMESTAMP.
The special filename "skip" may be used to simply
skip reading/writing any progress information.
--download Export entities to a file.
--dry_run Do not execute any remote_api calls.
--dump Use zero-configuration dump format.
--email=<string> The username to use. Will prompt if omitted.
--exporter_opts=<string>
A string to pass to the Exporter.initialize method.
--filename=<path> Path to the file to import. (Required)
--has_header Skip the first row of the input.
--http_limit=<int> The maximum numer of HTTP requests per second to
send to the server. (Default: 8)
--kind=<string> Name of the Entity object kind to put in the
datastore. (Required)
--loader_opts=<string> A string to pass to the Loader.initialize method.
--log_file=<path> File to write bulkloader logs. If not supplied
then a new log file will be created, named:
bulkloader-log-TIMESTAMP.
--map Map an action across datastore entities.
--mapper_opts=<string> A string to pass to the Mapper.Initialize method.
--num_threads=<int> Number of threads to use for uploading entities
(Default 10)
--passin Read the login password from stdin.
--restore Restore from zero-configuration dump format.
--result_db_filename=<path>
Result database to write to for downloads.
--rps_limit=<int> The maximum number of records per second to
transfer to the server. (Default: 20)
--url=<string> URL endpoint to post to for importing data.
(Required)
The exit status will be 0 on success, non-zero on import failure.
Works with the remote_api mix-in library for google.appengine.ext.remote_api.
Please look there for documentation about how to setup the server side.
Example:
%(arg0)s --url=http://app.appspot.com/remote_api --kind=Model \
--filename=data.csv --config_file=loader_config.py
"""
import csv
import errno
import getopt
import getpass
import imp
import logging
import os
import Queue
import re
import shutil
import signal
import StringIO
import sys
import threading
import time
import traceback
import urllib2
import urlparse
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
from google.appengine.ext import key_range as key_range_module
from google.appengine.ext.db import polymodel
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext.remote_api import throttle as remote_api_throttle
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import adaptive_thread_pool
from google.appengine.tools import appengine_rpc
from google.appengine.tools.requeue import ReQueue
try:
import sqlite3
except ImportError:
pass
logger = logging.getLogger('google.appengine.tools.bulkloader')
KeyRange = key_range_module.KeyRange
DEFAULT_THREAD_COUNT = 10
DEFAULT_BATCH_SIZE = 10
DEFAULT_DOWNLOAD_BATCH_SIZE = 100
DEFAULT_QUEUE_SIZE = DEFAULT_THREAD_COUNT * 10
_THREAD_SHOULD_EXIT = '_THREAD_SHOULD_EXIT'
STATE_READ = 0
STATE_SENDING = 1
STATE_SENT = 2
STATE_NOT_SENT = 3
STATE_GETTING = 1
STATE_GOT = 2
STATE_ERROR = 3
DATA_CONSUMED_TO_HERE = 'DATA_CONSUMED_TO_HERE'
INITIAL_BACKOFF = 1.0
BACKOFF_FACTOR = 2.0
DEFAULT_BANDWIDTH_LIMIT = 250000
DEFAULT_RPS_LIMIT = 20
DEFAULT_REQUEST_LIMIT = 8
MAXIMUM_INCREASE_DURATION = 5.0
MAXIMUM_HOLD_DURATION = 12.0
def ImportStateMessage(state):
"""Converts a numeric state identifier to a status message."""
return ({
STATE_READ: 'Batch read from file.',
STATE_SENDING: 'Sending batch to server.',
STATE_SENT: 'Batch successfully sent.',
STATE_NOT_SENT: 'Error while sending batch.'
}[state])
def ExportStateMessage(state):
"""Converts a numeric state identifier to a status message."""
return ({
STATE_READ: 'Batch read from file.',
STATE_GETTING: 'Fetching batch from server',
STATE_GOT: 'Batch successfully fetched.',
STATE_ERROR: 'Error while fetching batch'
}[state])
def MapStateMessage(state):
"""Converts a numeric state identifier to a status message."""
return ({
STATE_READ: 'Batch read from file.',
STATE_GETTING: 'Querying for batch from server',
STATE_GOT: 'Batch successfully fetched.',
STATE_ERROR: 'Error while fetching or mapping.'
}[state])
def ExportStateName(state):
"""Converts a numeric state identifier to a string."""
return ({
STATE_READ: 'READ',
STATE_GETTING: 'GETTING',
STATE_GOT: 'GOT',
STATE_ERROR: 'NOT_GOT'
}[state])
def ImportStateName(state):
"""Converts a numeric state identifier to a string."""
return ({
STATE_READ: 'READ',
STATE_GETTING: 'SENDING',
STATE_GOT: 'SENT',
STATE_NOT_SENT: 'NOT_SENT'
}[state])
class Error(Exception):
"""Base-class for exceptions in this module."""
class MissingPropertyError(Error):
"""An expected field is missing from an entity, and no default was given."""
class FatalServerError(Error):
"""An unrecoverable error occurred while posting data to the server."""
class ResumeError(Error):
"""Error while trying to resume a partial upload."""
class ConfigurationError(Error):
"""Error in configuration options."""
class AuthenticationError(Error):
"""Error while trying to authenticate with the server."""
class FileNotFoundError(Error):
"""A filename passed in by the user refers to a non-existent input file."""
class FileNotReadableError(Error):
"""A filename passed in by the user refers to a non-readable input file."""
class FileExistsError(Error):
"""A filename passed in by the user refers to an existing output file."""
class FileNotWritableError(Error):
"""A filename passed in by the user refers to a non-writable output file."""
class BadStateError(Error):
"""A work item in an unexpected state was encountered."""
class KeyRangeError(Error):
"""An error during construction of a KeyRangeItem."""
class FieldSizeLimitError(Error):
"""The csv module tried to read a field larger than the size limit."""
def __init__(self, limit):
self.message = """
A field in your CSV input file has exceeded the current limit of %d.
You can raise this limit by adding the following lines to your config file:
import csv
csv.field_size_limit(new_limit)
where new_limit is number larger than the size in bytes of the largest
field in your CSV.
""" % limit
Error.__init__(self, self.message)
class NameClashError(Error):
"""A name clash occurred while trying to alias old method names."""
def __init__(self, old_name, new_name, klass):
Error.__init__(self, old_name, new_name, klass)
self.old_name = old_name
self.new_name = new_name
self.klass = klass
def GetCSVGeneratorFactory(kind, csv_filename, batch_size, csv_has_header,
openfile=open, create_csv_reader=csv.reader):
"""Return a factory that creates a CSV-based UploadWorkItem generator.
Args:
kind: The kind of the entities being uploaded.
csv_filename: File on disk containing CSV data.
batch_size: Maximum number of CSV rows to stash into an UploadWorkItem.
csv_has_header: Whether to skip the first row of the CSV.
openfile: Used for dependency injection.
create_csv_reader: Used for dependency injection.
Returns:
A callable (accepting the Progress Queue and Progress Generators
as input) which creates the UploadWorkItem generator.
"""
loader = Loader.RegisteredLoader(kind)
loader._Loader__openfile = openfile
loader._Loader__create_csv_reader = create_csv_reader
record_generator = loader.generate_records(csv_filename)
def CreateGenerator(request_manager, progress_queue, progress_generator):
"""Initialize a UploadWorkItem generator.
Args:
request_manager: A RequestManager instance.
progress_queue: A ProgressQueue instance to send progress information.
progress_generator: A generator of progress information or None.
Returns:
An UploadWorkItemGenerator instance.
"""
return UploadWorkItemGenerator(request_manager,
progress_queue,
progress_generator,
record_generator,
csv_has_header,
batch_size)
return CreateGenerator
class UploadWorkItemGenerator(object):
"""Reads rows from a row generator and generates UploadWorkItems."""
def __init__(self,
request_manager,
progress_queue,
progress_generator,
record_generator,
skip_first,
batch_size):
"""Initialize a WorkItemGenerator.
Args:
request_manager: A RequestManager instance with which to associate
WorkItems.
progress_queue: A progress queue with which to associate WorkItems.
progress_generator: A generator of progress information.
record_generator: A generator of data records.
skip_first: Whether to skip the first data record.
batch_size: The number of data records per WorkItem.
"""
self.request_manager = request_manager
self.progress_queue = progress_queue
self.progress_generator = progress_generator
self.reader = record_generator
self.skip_first = skip_first
self.batch_size = batch_size
self.line_number = 1
self.column_count = None
self.read_rows = []
self.row_count = 0
self.xfer_count = 0
def _AdvanceTo(self, line):
"""Advance the reader to the given line.
Args:
line: A line number to advance to.
"""
while self.line_number < line:
self.reader.next()
self.line_number += 1
self.row_count += 1
self.xfer_count += 1
def _ReadRows(self, key_start, key_end):
"""Attempts to read and encode rows [key_start, key_end].
The encoded rows are stored in self.read_rows.
Args:
key_start: The starting line number.
key_end: The ending line number.
Raises:
StopIteration: if the reader runs out of rows
ResumeError: if there are an inconsistent number of columns.
"""
assert self.line_number == key_start
self.read_rows = []
while self.line_number <= key_end:
row = self.reader.next()
self.row_count += 1
if self.column_count is None:
self.column_count = len(row)
else:
if self.column_count != len(row):
raise ResumeError('Column count mismatch, %d: %s' %
(self.column_count, str(row)))
self.read_rows.append((self.line_number, row))
self.line_number += 1
def _MakeItem(self, key_start, key_end, rows, progress_key=None):
"""Makes a UploadWorkItem containing the given rows, with the given keys.
Args:
key_start: The start key for the UploadWorkItem.
key_end: The end key for the UploadWorkItem.
rows: A list of the rows for the UploadWorkItem.
progress_key: The progress key for the UploadWorkItem
Returns:
An UploadWorkItem instance for the given batch.
"""
assert rows
item = UploadWorkItem(self.request_manager, self.progress_queue, rows,
key_start, key_end, progress_key=progress_key)
return item
def Batches(self):
"""Reads from the record_generator and generates UploadWorkItems.
Yields:
Instances of class UploadWorkItem
Raises:
ResumeError: If the progress database and data file indicate a different
number of rows.
"""
if self.skip_first:
logger.info('Skipping header line.')
try:
self.reader.next()
except StopIteration:
return
exhausted = False
self.line_number = 1
self.column_count = None
logger.info('Starting import; maximum %d entities per post',
self.batch_size)
state = None
if self.progress_generator:
for progress_key, state, key_start, key_end in self.progress_generator:
if key_start:
try:
self._AdvanceTo(key_start)
self._ReadRows(key_start, key_end)
yield self._MakeItem(key_start,
key_end,
self.read_rows,
progress_key=progress_key)
except StopIteration:
logger.error('Mismatch between data file and progress database')
raise ResumeError(
'Mismatch between data file and progress database')
elif state == DATA_CONSUMED_TO_HERE:
try:
self._AdvanceTo(key_end + 1)
except StopIteration:
state = None
if self.progress_generator is None or state == DATA_CONSUMED_TO_HERE:
while not exhausted:
key_start = self.line_number
key_end = self.line_number + self.batch_size - 1
try:
self._ReadRows(key_start, key_end)
except StopIteration:
exhausted = True
key_end = self.line_number - 1
if key_start <= key_end:
yield self._MakeItem(key_start, key_end, self.read_rows)
class CSVGenerator(object):
"""Reads a CSV file and generates data records."""
def __init__(self,
csv_filename,
openfile=open,
create_csv_reader=csv.reader):
"""Initializes a CSV generator.
Args:
csv_filename: File on disk containing CSV data.
openfile: Used for dependency injection of 'open'.
create_csv_reader: Used for dependency injection of 'csv.reader'.
"""
self.csv_filename = csv_filename
self.openfile = openfile
self.create_csv_reader = create_csv_reader
def Records(self):
"""Reads the CSV data file and generates row records.
Yields:
Lists of strings
Raises:
ResumeError: If the progress database and data file indicate a different
number of rows.
"""
csv_file = self.openfile(self.csv_filename, 'rb')
reader = self.create_csv_reader(csv_file, skipinitialspace=True)
try:
for record in reader:
yield record
except csv.Error, e:
if e.args and e.args[0].startswith('field larger than field limit'):
limit = e.args[1]
raise FieldSizeLimitError(limit)
else:
raise
class KeyRangeItemGenerator(object):
"""Generates ranges of keys to download.
Reads progress information from the progress database and creates
KeyRangeItem objects corresponding to incompletely downloaded parts of an
export.
"""
def __init__(self, request_manager, kind, progress_queue, progress_generator,
key_range_item_factory):
"""Initialize the KeyRangeItemGenerator.
Args:
request_manager: A RequestManager instance.
kind: The kind of entities being transferred.
progress_queue: A queue used for tracking progress information.
progress_generator: A generator of prior progress information, or None
if there is no prior status.
key_range_item_factory: A factory to produce KeyRangeItems.
"""
self.request_manager = request_manager
self.kind = kind
self.row_count = 0
self.xfer_count = 0
self.progress_queue = progress_queue
self.progress_generator = progress_generator
self.key_range_item_factory = key_range_item_factory
def Batches(self):
"""Iterate through saved progress information.
Yields:
KeyRangeItem instances corresponding to undownloaded key ranges.
"""
if self.progress_generator is not None:
for progress_key, state, key_start, key_end in self.progress_generator:
if state is not None and state != STATE_GOT and key_start is not None:
key_start = ParseKey(key_start)
key_end = ParseKey(key_end)
key_range = KeyRange(key_start=key_start,
key_end=key_end)
result = self.key_range_item_factory(self.request_manager,
self.progress_queue,
self.kind,
key_range,
progress_key=progress_key,
state=STATE_READ)
yield result
else:
key_range = KeyRange()
yield self.key_range_item_factory(self.request_manager,
self.progress_queue,
self.kind,
key_range)
class DownloadResult(object):
"""Holds the result of an entity download."""
def __init__(self, continued, direction, keys, entities):
self.continued = continued
self.direction = direction
self.keys = keys
self.entities = entities
self.count = len(keys)
assert self.count == len(entities)
assert direction in (key_range_module.KeyRange.ASC,
key_range_module.KeyRange.DESC)
if self.count > 0:
if direction == key_range_module.KeyRange.ASC:
self.key_start = keys[0]
self.key_end = keys[-1]
else:
self.key_start = keys[-1]
self.key_end = keys[0]
def Entities(self):
"""Returns the list of entities for this result in key order."""
if self.direction == key_range_module.KeyRange.ASC:
return list(self.entities)
else:
result = list(self.entities)
result.reverse()
return result
def __str__(self):
return 'continued = %s\n%s' % (
str(self.continued), '\n'.join(str(self.entities)))
class _WorkItem(adaptive_thread_pool.WorkItem):
"""Holds a description of a unit of upload or download work."""
def __init__(self, progress_queue, key_start, key_end, state_namer,
state=STATE_READ, progress_key=None):
"""Initialize the _WorkItem instance.
Args:
progress_queue: A queue used for tracking progress information.
key_start: The start key of the work item.
key_end: The end key of the work item.
state_namer: Function to describe work item states.
state: The initial state of the work item.
progress_key: If this WorkItem represents state from a prior run,
then this will be the key within the progress database.
"""
adaptive_thread_pool.WorkItem.__init__(self,
'[%s-%s]' % (key_start, key_end))
self.progress_queue = progress_queue
self.state_namer = state_namer
self.state = state
self.progress_key = progress_key
self.progress_event = threading.Event()
self.key_start = key_start
self.key_end = key_end
self.error = None
self.traceback = None
def _TransferItem(self, thread_pool):
raise NotImplementedError()
def SetError(self):
"""Sets the error and traceback information for this thread.
This must be called from an exception handler.
"""
if not self.error:
exc_info = sys.exc_info()
self.error = exc_info[1]
self.traceback = exc_info[2]
def PerformWork(self, thread_pool):
"""Perform the work of this work item and report the results.
Args:
thread_pool: An AdaptiveThreadPool instance.
Returns:
A tuple (status, instruction) of the work status and an instruction
for the ThreadGate.
"""
status = adaptive_thread_pool.WorkItem.FAILURE
instruction = adaptive_thread_pool.ThreadGate.DECREASE
try:
self.MarkAsTransferring()
try:
transfer_time = self._TransferItem(thread_pool)
if transfer_time is None:
status = adaptive_thread_pool.WorkItem.RETRY
instruction = adaptive_thread_pool.ThreadGate.HOLD
else:
logger.debug('[%s] %s Transferred %d entities in %0.1f seconds',
threading.currentThread().getName(), self, self.count,
transfer_time)
sys.stdout.write('.')
sys.stdout.flush()
status = adaptive_thread_pool.WorkItem.SUCCESS
if transfer_time <= MAXIMUM_INCREASE_DURATION:
instruction = adaptive_thread_pool.ThreadGate.INCREASE
elif transfer_time <= MAXIMUM_HOLD_DURATION:
instruction = adaptive_thread_pool.ThreadGate.HOLD
except (db.InternalError, db.NotSavedError, db.Timeout,
db.TransactionFailedError,
apiproxy_errors.OverQuotaError,
apiproxy_errors.DeadlineExceededError,
apiproxy_errors.ApplicationError), e:
status = adaptive_thread_pool.WorkItem.RETRY
logger.exception('Retrying on non-fatal datastore error: %s', e)
except urllib2.HTTPError, e:
http_status = e.code
if http_status == 403 or (http_status >= 500 and http_status < 600):
status = adaptive_thread_pool.WorkItem.RETRY
logger.exception('Retrying on non-fatal HTTP error: %d %s',
http_status, e.msg)
else:
self.SetError()
status = adaptive_thread_pool.WorkItem.FAILURE
except urllib2.URLError, e:
if IsURLErrorFatal(e):
self.SetError()
status = adaptive_thread_pool.WorkItem.FAILURE
else:
status = adaptive_thread_pool.WorkItem.RETRY
logger.exception('Retrying on non-fatal URL error: %s', e.reason)
finally:
if status == adaptive_thread_pool.WorkItem.SUCCESS:
self.MarkAsTransferred()
else:
self.MarkAsError()
return (status, instruction)
def _AssertInState(self, *states):
"""Raises an Error if the state of this range is not in states."""
if not self.state in states:
raise BadStateError('%s:%s not in %s' %
(str(self),
self.state_namer(self.state),
map(self.state_namer, states)))
def _AssertProgressKey(self):
"""Raises an Error if the progress key is None."""
if self.progress_key is None:
raise BadStateError('%s: Progress key is missing' % str(self))
def MarkAsRead(self):
"""Mark this _WorkItem as read, updating the progress database."""
self._AssertInState(STATE_READ)
self._StateTransition(STATE_READ, blocking=True)
def MarkAsTransferring(self):
"""Mark this _WorkItem as transferring, updating the progress database."""
self._AssertInState(STATE_READ, STATE_ERROR)
self._AssertProgressKey()
self._StateTransition(STATE_GETTING, blocking=True)
def MarkAsTransferred(self):
"""Mark this _WorkItem as transferred, updating the progress database."""
raise NotImplementedError()
def MarkAsError(self):
"""Mark this _WorkItem as failed, updating the progress database."""
self._AssertInState(STATE_GETTING)
self._AssertProgressKey()
self._StateTransition(STATE_ERROR, blocking=True)
def _StateTransition(self, new_state, blocking=False):
"""Transition the work item to a new state, storing progress information.
Args:
new_state: The state to transition to.
blocking: Whether to block for the progress thread to acknowledge the
transition.
"""
assert not self.progress_event.isSet()
self.state = new_state
self.progress_queue.put(self)
if blocking:
self.progress_event.wait()
self.progress_event.clear()
class UploadWorkItem(_WorkItem):
"""Holds a unit of uploading work.
A UploadWorkItem represents a number of entities that need to be uploaded to
Google App Engine. These entities are encoded in the "content" field of
the UploadWorkItem, and will be POST'd as-is to the server.
The entities are identified by a range of numeric keys, inclusively. In
the case of a resumption of an upload, or a replay to correct errors,
these keys must be able to identify the same set of entities.
Note that keys specify a range. The entities do not have to sequentially
fill the entire range, they must simply bound a range of valid keys.
"""
def __init__(self, request_manager, progress_queue, rows, key_start, key_end,
progress_key=None):
"""Initialize the UploadWorkItem instance.
Args:
request_manager: A RequestManager instance.
progress_queue: A queue used for tracking progress information.
rows: A list of pairs of a line number and a list of column values
key_start: The (numeric) starting key, inclusive.
key_end: The (numeric) ending key, inclusive.
progress_key: If this UploadWorkItem represents state from a prior run,
then this will be the key within the progress database.
"""
_WorkItem.__init__(self, progress_queue, key_start, key_end,
ImportStateName, state=STATE_READ,
progress_key=progress_key)
assert isinstance(key_start, (int, long))
assert isinstance(key_end, (int, long))
assert key_start <= key_end
self.request_manager = request_manager
self.rows = rows
self.content = None
self.count = len(rows)
def __str__(self):
return '[%s-%s]' % (self.key_start, self.key_end)
def _TransferItem(self, thread_pool, get_time=time.time):
"""Transfers the entities associated with an item.
Args:
thread_pool: An AdaptiveThreadPool instance.
get_time: Used for dependency injection.
"""
t = get_time()
if not self.content:
self.content = self.request_manager.EncodeContent(self.rows)
try:
self.request_manager.PostEntities(self.content)
except:
raise
return get_time() - t
def MarkAsTransferred(self):
"""Mark this UploadWorkItem as sucessfully-sent to the server."""
self._AssertInState(STATE_SENDING)
self._AssertProgressKey()
self._StateTransition(STATE_SENT, blocking=False)
def GetImplementationClass(kind_or_class_key):
"""Returns the implementation class for a given kind or class key.
Args:
kind_or_class_key: A kind string or a tuple of kind strings.
Return:
A db.Model subclass for the given kind or class key.
"""
if isinstance(kind_or_class_key, tuple):
try:
implementation_class = polymodel._class_map[kind_or_class_key]
except KeyError:
raise db.KindError('No implementation for class \'%s\'' %
kind_or_class_key)
else:
implementation_class = db.class_for_kind(kind_or_class_key)
return implementation_class
def KeyLEQ(key1, key2):
"""Compare two keys for less-than-or-equal-to.
All keys with numeric ids come before all keys with names. None represents
an unbounded end-point so it is both greater and less than any other key.
Args:
key1: An int or datastore.Key instance.
key2: An int or datastore.Key instance.
Returns:
True if key1 <= key2
"""
if key1 is None or key2 is None:
return True
return key1 <= key2
class KeyRangeItem(_WorkItem):
"""Represents an item of work that scans over a key range.
A KeyRangeItem object represents holds a KeyRange
and has an associated state: STATE_READ, STATE_GETTING, STATE_GOT,
and STATE_ERROR.
- STATE_READ indicates the range ready to be downloaded by a worker thread.
- STATE_GETTING indicates the range is currently being downloaded.
- STATE_GOT indicates that the range was successfully downloaded
- STATE_ERROR indicates that an error occurred during the last download
attempt
KeyRangeItems not in the STATE_GOT state are stored in the progress database.
When a piece of KeyRangeItem work is downloaded, the download may cover only
a portion of the range. In this case, the old KeyRangeItem is removed from
the progress database and ranges covering the undownloaded range are
generated and stored as STATE_READ in the export progress database.
"""
def __init__(self,
request_manager,
progress_queue,
kind,
key_range,
progress_key=None,
state=STATE_READ):
"""Initialize a KeyRangeItem object.
Args:
request_manager: A RequestManager instance.
progress_queue: A queue used for tracking progress information.
kind: The kind of entities for this range.
key_range: A KeyRange instance for this work item.
progress_key: The key for this range within the progress database.
state: The initial state of this range.
"""
_WorkItem.__init__(self, progress_queue, key_range.key_start,
key_range.key_end, ExportStateName, state=state,
progress_key=progress_key)
self.request_manager = request_manager
self.kind = kind
self.key_range = key_range
self.download_result = None
self.count = 0
self.key_start = key_range.key_start
self.key_end = key_range.key_end
def __str__(self):
return str(self.key_range)
def __repr__(self):
return self.__str__()
def MarkAsTransferred(self):
"""Mark this KeyRangeItem as transferred, updating the progress database."""
pass
def Process(self, download_result, thread_pool, batch_size,
new_state=STATE_GOT):
"""Mark this KeyRangeItem as success, updating the progress database.
Process will split this KeyRangeItem based on the content of
download_result and adds the unfinished ranges to the work queue.
Args:
download_result: A DownloadResult instance.
thread_pool: An AdaptiveThreadPool instance.
batch_size: The number of entities to transfer per request.
new_state: The state to transition the completed range to.
"""
self._AssertInState(STATE_GETTING)
self._AssertProgressKey()
self.download_result = download_result
self.count = len(download_result.keys)
if download_result.continued:
self._FinishedRange()._StateTransition(new_state, blocking=True)
self._AddUnfinishedRanges(thread_pool, batch_size)
else:
self._StateTransition(new_state, blocking=True)
def _FinishedRange(self):
"""Returns the range completed by the download_result.
Returns:
A KeyRangeItem representing a completed range.
"""
assert self.download_result is not None
if self.key_range.direction == key_range_module.KeyRange.ASC:
key_start = self.key_range.key_start
if self.download_result.continued:
key_end = self.download_result.key_end
else:
key_end = self.key_range.key_end
else:
key_end = self.key_range.key_end
if self.download_result.continued:
key_start = self.download_result.key_start
else:
key_start = self.key_range.key_start
key_range = KeyRange(key_start=key_start,
key_end=key_end,
direction=self.key_range.direction)
result = self.__class__(self.request_manager,
self.progress_queue,
self.kind,
key_range,
progress_key=self.progress_key,
state=self.state)
result.download_result = self.download_result
result.count = self.count
return result
def _SplitAndAddRanges(self, thread_pool, batch_size):
"""Split the key range [key_start, key_end] into a list of ranges."""
if self.download_result.direction == key_range_module.KeyRange.ASC:
key_range = KeyRange(
key_start=self.download_result.key_end,
key_end=self.key_range.key_end,
include_start=False)
else:
key_range = KeyRange(
key_start=self.key_range.key_start,
key_end=self.download_result.key_start,
include_end=False)
if thread_pool.QueuedItemCount() > 2 * thread_pool.num_threads():
ranges = [key_range]
else:
ranges = key_range.split_range(batch_size=batch_size)
for key_range in ranges:
key_range_item = self.__class__(self.request_manager,
self.progress_queue,
self.kind,
key_range)
key_range_item.MarkAsRead()
thread_pool.SubmitItem(key_range_item, block=True)
def _AddUnfinishedRanges(self, thread_pool, batch_size):
"""Adds incomplete KeyRanges to the thread_pool.
Args:
thread_pool: An AdaptiveThreadPool instance.
batch_size: The number of entities to transfer per request.
Returns:
A list of KeyRanges representing incomplete datastore key ranges.
Raises:
KeyRangeError: if this key range has already been completely transferred.
"""
assert self.download_result is not None
if self.download_result.continued:
self._SplitAndAddRanges(thread_pool, batch_size)
else:
raise KeyRangeError('No unfinished part of key range.')
class DownloadItem(KeyRangeItem):
"""A KeyRangeItem for downloading key ranges."""
def _TransferItem(self, thread_pool, get_time=time.time):
"""Transfers the entities associated with an item."""
t = get_time()
download_result = self.request_manager.GetEntities(self)
transfer_time = get_time() - t
self.Process(download_result, thread_pool,
self.request_manager.batch_size)
return transfer_time
class MapperItem(KeyRangeItem):
"""A KeyRangeItem for mapping over key ranges."""
def _TransferItem(self, thread_pool, get_time=time.time):
t = get_time()
download_result = self.request_manager.GetEntities(self)
transfer_time = get_time() - t
mapper = self.request_manager.GetMapper()
try:
mapper.batch_apply(download_result.Entities())
except MapperRetry:
return None
self.Process(download_result, thread_pool,
self.request_manager.batch_size)
return transfer_time
class RequestManager(object):
"""A class which wraps a connection to the server."""
def __init__(self,
app_id,
host_port,
url_path,
kind,
throttle,
batch_size,
secure,
email,
passin,
dry_run=False):
"""Initialize a RequestManager object.
Args:
app_id: String containing the application id for requests.
host_port: String containing the "host:port" pair; the port is optional.
url_path: partial URL (path) to post entity data to.
kind: Kind of the Entity records being posted.
throttle: A Throttle instance.
batch_size: The number of entities to transfer per request.
secure: Use SSL when communicating with server.
email: If not none, the username to log in with.
passin: If True, the password will be read from standard in.
"""
self.app_id = app_id
self.host_port = host_port
self.host = host_port.split(':')[0]
if url_path and url_path[0] != '/':
url_path = '/' + url_path
self.url_path = url_path
self.kind = kind
self.throttle = throttle
self.batch_size = batch_size
self.secure = secure
self.authenticated = False
self.auth_called = False
self.parallel_download = True
self.email = email
self.passin = passin
self.mapper = None
self.dry_run = dry_run
if self.dry_run:
logger.info('Running in dry run mode, skipping remote_api setup')
return
logger.debug('Configuring remote_api. url_path = %s, '
'servername = %s' % (url_path, host_port))
def CookieHttpRpcServer(*args, **kwargs):
kwargs['save_cookies'] = True
kwargs['account_type'] = 'HOSTED_OR_GOOGLE'
return appengine_rpc.HttpRpcServer(*args, **kwargs)
remote_api_stub.ConfigureRemoteDatastore(
app_id,
url_path,
self.AuthFunction,
servername=host_port,
rpc_server_factory=CookieHttpRpcServer,
secure=self.secure)
remote_api_throttle.ThrottleRemoteDatastore(self.throttle)
logger.debug('Bulkloader using app_id: %s', os.environ['APPLICATION_ID'])
def Authenticate(self):
"""Invoke authentication if necessary."""
logger.info('Connecting to %s%s', self.host_port, self.url_path)
if self.dry_run:
self.authenticated = True
return
remote_api_stub.MaybeInvokeAuthentication()
self.authenticated = True
def AuthFunction(self,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass):
"""Prompts the user for a username and password.
Caches the results the first time it is called and returns the
same result every subsequent time.
Args:
raw_input_fn: Used for dependency injection.
password_input_fn: Used for dependency injection.
Returns:
A pair of the username and password.
"""
if self.email:
email = self.email
else:
print 'Please enter login credentials for %s' % (
self.host)
email = raw_input_fn('Email: ')
if email:
password_prompt = 'Password for %s: ' % email
if self.passin:
password = raw_input_fn(password_prompt)
else:
password = password_input_fn(password_prompt)
else:
password = None
self.auth_called = True
return (email, password)
def EncodeContent(self, rows, loader=None):
"""Encodes row data to the wire format.
Args:
rows: A list of pairs of a line number and a list of column values.
loader: Used for dependency injection.
Returns:
A list of datastore.Entity instances.
Raises:
ConfigurationError: if no loader is defined for self.kind
"""
if not loader:
try:
loader = Loader.RegisteredLoader(self.kind)
except KeyError:
logger.error('No Loader defined for kind %s.' % self.kind)
raise ConfigurationError('No Loader defined for kind %s.' % self.kind)
entities = []
for line_number, values in rows:
key = loader.generate_key(line_number, values)
if isinstance(key, datastore.Key):
parent = key.parent()
key = key.name()
else:
parent = None
entity = loader.create_entity(values, key_name=key, parent=parent)
def ToEntity(entity):
if isinstance(entity, db.Model):
return entity._populate_entity()
else:
return entity
if isinstance(entity, list):
entities.extend(map(ToEntity, entity))
elif entity:
entities.append(ToEntity(entity))
return entities
def PostEntities(self, entities):
"""Posts Entity records to a remote endpoint over HTTP.
Args:
entities: A list of datastore entities.
"""
if self.dry_run:
return
datastore.Put(entities)
def _QueryForPbs(self, query):
"""Perform the given query and return a list of entity_pb's."""
try:
query_pb = query._ToPb(limit=self.batch_size)
result_pb = datastore_pb.QueryResult()
apiproxy_stub_map.MakeSyncCall('datastore_v3', 'RunQuery', query_pb,
result_pb)
next_pb = datastore_pb.NextRequest()
next_pb.set_count(self.batch_size)
next_pb.mutable_cursor().CopyFrom(result_pb.cursor())
result_pb = datastore_pb.QueryResult()
apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Next', next_pb, result_pb)
return result_pb.result_list()
except apiproxy_errors.ApplicationError, e:
raise datastore._ToDatastoreError(e)
def GetEntities(self, key_range_item, key_factory=datastore.Key):
"""Gets Entity records from a remote endpoint over HTTP.
Args:
key_range_item: Range of keys to get.
key_factory: Used for dependency injection.
Returns:
A DownloadResult instance.
Raises:
ConfigurationError: if no Exporter is defined for self.kind
"""
keys = []
entities = []
if self.parallel_download:
query = key_range_item.key_range.make_directed_datastore_query(self.kind)
try:
results = self._QueryForPbs(query)
except datastore_errors.NeedIndexError:
logger.info('%s: No descending index on __key__, '
'performing serial download', self.kind)
self.parallel_download = False
if not self.parallel_download:
key_range_item.key_range.direction = key_range_module.KeyRange.ASC
query = key_range_item.key_range.make_ascending_datastore_query(self.kind)
results = self._QueryForPbs(query)
size = len(results)
for entity in results:
key = key_factory()
key._Key__reference = entity.key()
entities.append(entity)
keys.append(key)
continued = (size == self.batch_size)
key_range_item.count = size
return DownloadResult(continued, key_range_item.key_range.direction,
keys, entities)
def GetMapper(self):
"""Returns a mapper for the registered kind.
Returns:
A Mapper instance.
Raises:
ConfigurationError: if no Mapper is defined for self.kind
"""
if not self.mapper:
try:
self.mapper = Mapper.RegisteredMapper(self.kind)
except KeyError:
logger.error('No Mapper defined for kind %s.' % self.kind)
raise ConfigurationError('No Mapper defined for kind %s.' % self.kind)
return self.mapper
def InterruptibleSleep(sleep_time):
"""Puts thread to sleep, checking this threads exit_flag twice a second.
Args:
sleep_time: Time to sleep.
"""
slept = 0.0
epsilon = .0001
thread = threading.currentThread()
while slept < sleep_time - epsilon:
remaining = sleep_time - slept
this_sleep_time = min(remaining, 0.5)
time.sleep(this_sleep_time)
slept += this_sleep_time
if thread.exit_flag:
return
class _ThreadBase(threading.Thread):
"""Provide some basic features for the threads used in the uploader.
This abstract base class is used to provide some common features:
* Flag to ask thread to exit as soon as possible.
* Record exit/error status for the primary thread to pick up.
* Capture exceptions and record them for pickup.
* Some basic logging of thread start/stop.
* All threads are "daemon" threads.
* Friendly names for presenting to users.
Concrete sub-classes must implement PerformWork().
Either self.NAME should be set or GetFriendlyName() be overridden to
return a human-friendly name for this thread.
The run() method starts the thread and prints start/exit messages.
self.exit_flag is intended to signal that this thread should exit
when it gets the chance. PerformWork() should check self.exit_flag
whenever it has the opportunity to exit gracefully.
"""
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.exit_flag = False
self.error = None
self.traceback = None
def run(self):
"""Perform the work of the thread."""
logger.debug('[%s] %s: started', self.getName(), self.__class__.__name__)
try:
self.PerformWork()
except:
self.SetError()
logger.exception('[%s] %s:', self.getName(), self.__class__.__name__)
logger.debug('[%s] %s: exiting', self.getName(), self.__class__.__name__)
def SetError(self):
"""Sets the error and traceback information for this thread.
This must be called from an exception handler.
"""
if not self.error:
exc_info = sys.exc_info()
self.error = exc_info[1]
self.traceback = exc_info[2]
def PerformWork(self):
"""Perform the thread-specific work."""
raise NotImplementedError()
def CheckError(self):
"""If an error is present, then log it."""
if self.error:
logger.error('Error in %s: %s', self.GetFriendlyName(), self.error)
if self.traceback:
logger.debug(''.join(traceback.format_exception(self.error.__class__,
self.error,
self.traceback)))
def GetFriendlyName(self):
"""Returns a human-friendly description of the thread."""
if hasattr(self, 'NAME'):
return self.NAME
return 'unknown thread'
non_fatal_error_codes = set([errno.EAGAIN,
errno.ENETUNREACH,
errno.ENETRESET,
errno.ECONNRESET,
errno.ETIMEDOUT,
errno.EHOSTUNREACH])
def IsURLErrorFatal(error):
"""Returns False if the given URLError may be from a transient failure.
Args:
error: A urllib2.URLError instance.
"""
assert isinstance(error, urllib2.URLError)
if not hasattr(error, 'reason'):
return True
if not isinstance(error.reason[0], int):
return True
return error.reason[0] not in non_fatal_error_codes
class DataSourceThread(_ThreadBase):
"""A thread which reads WorkItems and pushes them into queue.
This thread will read/consume WorkItems from a generator (produced by
the generator factory). These WorkItems will then be pushed into the
thread_pool. Note that reading will block if/when the thread_pool becomes
full. Information on content consumed from the generator will be pushed
into the progress_queue.
"""
NAME = 'data source thread'
def __init__(self,
request_manager,
thread_pool,
progress_queue,
workitem_generator_factory,
progress_generator_factory):
"""Initialize the DataSourceThread instance.
Args:
request_manager: A RequestManager instance.
thread_pool: An AdaptiveThreadPool instance.
progress_queue: A queue used for tracking progress information.
workitem_generator_factory: A factory that creates a WorkItem generator
progress_generator_factory: A factory that creates a generator which
produces prior progress status, or None if there is no prior status
to use.
"""
_ThreadBase.__init__(self)
self.request_manager = request_manager
self.thread_pool = thread_pool
self.progress_queue = progress_queue
self.workitem_generator_factory = workitem_generator_factory
self.progress_generator_factory = progress_generator_factory
self.entity_count = 0
def PerformWork(self):
"""Performs the work of a DataSourceThread."""
if self.progress_generator_factory:
progress_gen = self.progress_generator_factory()
else:
progress_gen = None
content_gen = self.workitem_generator_factory(self.request_manager,
self.progress_queue,
progress_gen)
self.xfer_count = 0
self.read_count = 0
self.read_all = False
for item in content_gen.Batches():
item.MarkAsRead()
while not self.exit_flag:
try:
self.thread_pool.SubmitItem(item, block=True, timeout=1.0)
self.entity_count += item.count
break
except Queue.Full:
pass
if self.exit_flag:
break
if not self.exit_flag:
self.read_all = True
self.read_count = content_gen.row_count
self.xfer_count = content_gen.xfer_count
def _RunningInThread(thread):
"""Return True if we are running within the specified thread."""
return threading.currentThread().getName() == thread.getName()
class _Database(object):
"""Base class for database connections in this module.
The table is created by a primary thread (the python main thread)
but all future lookups and updates are performed by a secondary
thread.
"""
SIGNATURE_TABLE_NAME = 'bulkloader_database_signature'
def __init__(self,
db_filename,
create_table,
signature,
index=None,
commit_periodicity=100):
"""Initialize the _Database instance.
Args:
db_filename: The sqlite3 file to use for the database.
create_table: A string containing the SQL table creation command.
signature: A string identifying the important invocation options,
used to make sure we are not using an old database.
index: An optional string to create an index for the database.
commit_periodicity: Number of operations between database commits.
"""
self.db_filename = db_filename
logger.info('Opening database: %s', db_filename)
self.primary_conn = sqlite3.connect(db_filename, isolation_level=None)
self.primary_thread = threading.currentThread()
self.secondary_conn = None
self.secondary_thread = None
self.operation_count = 0
self.commit_periodicity = commit_periodicity
try:
self.primary_conn.execute(create_table)
except sqlite3.OperationalError, e:
if 'already exists' not in e.message:
raise
if index:
try:
self.primary_conn.execute(index)
except sqlite3.OperationalError, e:
if 'already exists' not in e.message:
raise
self.existing_table = False
signature_cursor = self.primary_conn.cursor()
create_signature = """
create table %s (
value TEXT not null)
""" % _Database.SIGNATURE_TABLE_NAME
try:
self.primary_conn.execute(create_signature)
self.primary_conn.cursor().execute(
'insert into %s (value) values (?)' % _Database.SIGNATURE_TABLE_NAME,
(signature,))
except sqlite3.OperationalError, e:
if 'already exists' not in e.message:
logger.exception('Exception creating table:')
raise
else:
self.existing_table = True
signature_cursor.execute(
'select * from %s' % _Database.SIGNATURE_TABLE_NAME)
(result,) = signature_cursor.fetchone()
if result and result != signature:
logger.error('Database signature mismatch:\n\n'
'Found:\n'
'%s\n\n'
'Expecting:\n'
'%s\n',
result, signature)
raise ResumeError('Database signature mismatch: %s != %s' % (
signature, result))
def ThreadComplete(self):
"""Finalize any operations the secondary thread has performed.
The database aggregates lots of operations into a single commit, and
this method is used to commit any pending operations as the thread
is about to shut down.
"""
if self.secondary_conn:
self._MaybeCommit(force_commit=True)
def _MaybeCommit(self, force_commit=False):
"""Periodically commit changes into the SQLite database.
Committing every operation is quite expensive, and slows down the
operation of the script. Thus, we only commit after every N operations,
as determined by the self.commit_periodicity value. Optionally, the
caller can force a commit.
Args:
force_commit: Pass True in order for a commit to occur regardless
of the current operation count.
"""
self.operation_count += 1
if force_commit or (self.operation_count % self.commit_periodicity) == 0:
self.secondary_conn.commit()
def _OpenSecondaryConnection(self):
"""Possibly open a database connection for the secondary thread.
If the connection is not open (for the calling thread, which is assumed
to be the unique secondary thread), then open it. We also open a couple
cursors for later use (and reuse).
"""
if self.secondary_conn:
return
assert not _RunningInThread(self.primary_thread)
self.secondary_thread = threading.currentThread()
self.secondary_conn = sqlite3.connect(self.db_filename)
self.insert_cursor = self.secondary_conn.cursor()
self.update_cursor = self.secondary_conn.cursor()
zero_matcher = re.compile(r'\x00')
zero_one_matcher = re.compile(r'\x00\x01')
def KeyStr(key):
"""Returns a string to represent a key, preserving ordering.
Unlike datastore.Key.__str__(), we have the property:
key1 < key2 ==> KeyStr(key1) < KeyStr(key2)
The key string is constructed from the key path as follows:
(1) Strings are prepended with ':' and numeric id's are padded to
20 digits.
(2) Any null characters (u'\0') present are replaced with u'\0\1'
(3) The sequence u'\0\0' is used to separate each component of the path.
(1) assures that names and ids compare properly, while (2) and (3) enforce
the part-by-part comparison of pieces of the path.
Args:
key: A datastore.Key instance.
Returns:
A string representation of the key, which preserves ordering.
"""
assert isinstance(key, datastore.Key)
path = key.to_path()
out_path = []
for part in path:
if isinstance(part, (int, long)):
part = '%020d' % part
else:
part = ':%s' % part
out_path.append(zero_matcher.sub(u'\0\1', part))
out_str = u'\0\0'.join(out_path)
return out_str
def StrKey(key_str):
"""The inverse of the KeyStr function.
Args:
key_str: A string in the range of KeyStr.
Returns:
A datastore.Key instance k, such that KeyStr(k) == key_str.
"""
parts = key_str.split(u'\0\0')
for i in xrange(len(parts)):
if parts[i][0] == ':':
part = parts[i][1:]
part = zero_one_matcher.sub(u'\0', part)
parts[i] = part
else:
parts[i] = int(parts[i])
return datastore.Key.from_path(*parts)
class ResultDatabase(_Database):
"""Persistently record all the entities downloaded during an export.
The entities are held in the database by their unique datastore key
in order to avoid duplication if an export is restarted.
"""
def __init__(self, db_filename, signature, commit_periodicity=1):
"""Initialize a ResultDatabase object.
Args:
db_filename: The name of the SQLite database to use.
signature: A string identifying the important invocation options,
used to make sure we are not using an old database.
commit_periodicity: How many operations to perform between commits.
"""
self.complete = False
create_table = ('create table result (\n'
'id BLOB primary key,\n'
'value BLOB not null)')
_Database.__init__(self,
db_filename,
create_table,
signature,
commit_periodicity=commit_periodicity)
if self.existing_table:
cursor = self.primary_conn.cursor()
cursor.execute('select count(*) from result')
self.existing_count = int(cursor.fetchone()[0])
else:
self.existing_count = 0
self.count = self.existing_count
def _StoreEntity(self, entity_id, entity):
"""Store an entity in the result database.
Args:
entity_id: A datastore.Key for the entity.
entity: The entity to store.
Returns:
True if this entities is not already present in the result database.
"""
assert _RunningInThread(self.secondary_thread)
assert isinstance(entity_id, datastore.Key), (
'expected a datastore.Key, got a %s' % entity_id.__class__.__name__)
key_str = buffer(KeyStr(entity_id).encode('utf-8'))
self.insert_cursor.execute(
'select count(*) from result where id = ?', (key_str,))
already_present = self.insert_cursor.fetchone()[0]
result = True
if already_present:
result = False
self.insert_cursor.execute('delete from result where id = ?',
(key_str,))
else:
self.count += 1
value = entity.Encode()
self.insert_cursor.execute(
'insert into result (id, value) values (?, ?)',
(key_str, buffer(value)))
return result
def StoreEntities(self, keys, entities):
"""Store a group of entities in the result database.
Args:
keys: A list of entity keys.
entities: A list of entities.
Returns:
The number of new entities stored in the result database.
"""
self._OpenSecondaryConnection()
t = time.time()
count = 0
for entity_id, entity in zip(keys,
entities):
if self._StoreEntity(entity_id, entity):
count += 1
logger.debug('%s insert: delta=%.3f',
self.db_filename,
time.time() - t)
logger.debug('Entities transferred total: %s', self.count)
self._MaybeCommit()
return count
def ResultsComplete(self):
"""Marks the result database as containing complete results."""
self.complete = True
def AllEntities(self):
"""Yields all pairs of (id, value) from the result table."""
conn = sqlite3.connect(self.db_filename, isolation_level=None)
cursor = conn.cursor()
cursor.execute(
'select id, value from result order by id')
for unused_entity_id, entity in cursor:
entity_proto = entity_pb.EntityProto(contents=entity)
yield datastore.Entity._FromPb(entity_proto)
class _ProgressDatabase(_Database):
"""Persistently record all progress information during an upload.
This class wraps a very simple SQLite database which records each of
the relevant details from a chunk of work. If the loader is
resumed, then data is replayed out of the database.
"""
def __init__(self,
db_filename,
sql_type,
py_type,
signature,
commit_periodicity=100):
"""Initialize the ProgressDatabase instance.
Args:
db_filename: The name of the SQLite database to use.
sql_type: A string of the SQL type to use for entity keys.
py_type: The python type of entity keys.
signature: A string identifying the important invocation options,
used to make sure we are not using an old database.
commit_periodicity: How many operations to perform between commits.
"""
self.prior_key_end = None
create_table = ('create table progress (\n'
'id integer primary key autoincrement,\n'
'state integer not null,\n'
'key_start %s,\n'
'key_end %s)'
% (sql_type, sql_type))
self.py_type = py_type
index = 'create index i_state on progress (state)'
_Database.__init__(self,
db_filename,
create_table,
signature,
index=index,
commit_periodicity=commit_periodicity)
def UseProgressData(self):
"""Returns True if the database has progress information.
Note there are two basic cases for progress information:
1) All saved records indicate a successful upload. In this case, we
need to skip everything transmitted so far and then send the rest.
2) Some records for incomplete transfer are present. These need to be
sent again, and then we resume sending after all the successful
data.
Returns:
True: if the database has progress information.
Raises:
ResumeError: if there is an error retrieving rows from the database.
"""
assert _RunningInThread(self.primary_thread)
cursor = self.primary_conn.cursor()
cursor.execute('select count(*) from progress')
row = cursor.fetchone()
if row is None:
raise ResumeError('Cannot retrieve progress information from database.')
return row[0] != 0
def StoreKeys(self, key_start, key_end):
"""Record a new progress record, returning a key for later updates.
The specified progress information will be persisted into the database.
A unique key will be returned that identifies this progress state. The
key is later used to (quickly) update this record.
For the progress resumption to proceed properly, calls to StoreKeys
MUST specify monotonically increasing key ranges. This will result in
a database whereby the ID, KEY_START, and KEY_END rows are all
increasing (rather than having ranges out of order).
NOTE: the above precondition is NOT tested by this method (since it
would imply an additional table read or two on each invocation).
Args:
key_start: The starting key of the WorkItem (inclusive)
key_end: The end key of the WorkItem (inclusive)
Returns:
A string to later be used as a unique key to update this state.
"""
self._OpenSecondaryConnection()
assert _RunningInThread(self.secondary_thread)
assert (not key_start) or isinstance(key_start, self.py_type), (
'%s is a %s, %s expected %s' % (key_start,
key_start.__class__,
self.__class__.__name__,
self.py_type))
assert (not key_end) or isinstance(key_end, self.py_type), (
'%s is a %s, %s expected %s' % (key_end,
key_end.__class__,
self.__class__.__name__,
self.py_type))
assert KeyLEQ(key_start, key_end), '%s not less than %s' % (
repr(key_start), repr(key_end))
self.insert_cursor.execute(
'insert into progress (state, key_start, key_end) values (?, ?, ?)',
(STATE_READ, unicode(key_start), unicode(key_end)))
progress_key = self.insert_cursor.lastrowid
self._MaybeCommit()
return progress_key
def UpdateState(self, key, new_state):
"""Update a specified progress record with new information.
Args:
key: The key for this progress record, returned from StoreKeys
new_state: The new state to associate with this progress record.
"""
self._OpenSecondaryConnection()
assert _RunningInThread(self.secondary_thread)
assert isinstance(new_state, int)
self.update_cursor.execute('update progress set state=? where id=?',
(new_state, key))
self._MaybeCommit()
def DeleteKey(self, progress_key):
"""Delete the entities with the given key from the result database."""
self._OpenSecondaryConnection()
assert _RunningInThread(self.secondary_thread)
t = time.time()
self.insert_cursor.execute(
'delete from progress where rowid = ?', (progress_key,))
logger.debug('delete: delta=%.3f', time.time() - t)
self._MaybeCommit()
def GetProgressStatusGenerator(self):
"""Get a generator which yields progress information.
The returned generator will yield a series of 4-tuples that specify
progress information about a prior run of the uploader. The 4-tuples
have the following values:
progress_key: The unique key to later update this record with new
progress information.
state: The last state saved for this progress record.
key_start: The starting key of the items for uploading (inclusive).
key_end: The ending key of the items for uploading (inclusive).
After all incompletely-transferred records are provided, then one
more 4-tuple will be generated:
None
DATA_CONSUMED_TO_HERE: A unique string value indicating this record
is being provided.
None
key_end: An integer value specifying the last data source key that
was handled by the previous run of the uploader.
The caller should begin uploading records which occur after key_end.
Yields:
Four-tuples of (progress_key, state, key_start, key_end)
"""
conn = sqlite3.connect(self.db_filename, isolation_level=None)
cursor = conn.cursor()
cursor.execute('select max(key_end) from progress')
result = cursor.fetchone()
if result is not None:
key_end = result[0]
else:
logger.debug('No rows in progress database.')
return
self.prior_key_end = key_end
cursor.execute(
'select id, state, key_start, key_end from progress'
' where state != ?'
' order by id',
(STATE_SENT,))
rows = cursor.fetchall()
for row in rows:
if row is None:
break
progress_key, state, key_start, key_end = row
yield progress_key, state, key_start, key_end
yield None, DATA_CONSUMED_TO_HERE, None, key_end
def ProgressDatabase(db_filename, signature):
"""Returns a database to store upload progress information."""
return _ProgressDatabase(db_filename, 'INTEGER', int, signature)
class ExportProgressDatabase(_ProgressDatabase):
"""A database to store download progress information."""
def __init__(self, db_filename, signature):
"""Initialize an ExportProgressDatabase."""
_ProgressDatabase.__init__(self,
db_filename,
'TEXT',
datastore.Key,
signature,
commit_periodicity=1)
def UseProgressData(self):
"""Check if the progress database contains progress data.
Returns:
True: if the database contains progress data.
"""
return self.existing_table
class StubProgressDatabase(object):
"""A stub implementation of ProgressDatabase which does nothing."""
def UseProgressData(self):
"""Whether the stub database has progress information (it doesn't)."""
return False
def StoreKeys(self, unused_key_start, unused_key_end):
"""Pretend to store a key in the stub database."""
return 'fake-key'
def UpdateState(self, unused_key, unused_new_state):
"""Pretend to update the state of a progress item."""
pass
def ThreadComplete(self):
"""Finalize operations on the stub database (i.e. do nothing)."""
pass
class _ProgressThreadBase(_ThreadBase):
"""A thread which records progress information for the upload process.
The progress information is stored into the provided progress database.
This class is not responsible for replaying a prior run's progress
information out of the database. Separate mechanisms must be used to
resume a prior upload attempt.
"""
NAME = 'progress tracking thread'
def __init__(self, progress_queue, progress_db):
"""Initialize the ProgressTrackerThread instance.
Args:
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
"""
_ThreadBase.__init__(self)
self.progress_queue = progress_queue
self.db = progress_db
self.entities_transferred = 0
def EntitiesTransferred(self):
"""Return the total number of unique entities transferred."""
return self.entities_transferred
def UpdateProgress(self, item):
"""Updates the progress information for the given item.
Args:
item: A work item whose new state will be recorded
"""
raise NotImplementedError()
def WorkFinished(self):
"""Performs final actions after the entity transfer is complete."""
raise NotImplementedError()
def PerformWork(self):
"""Performs the work of a ProgressTrackerThread."""
while not self.exit_flag:
try:
item = self.progress_queue.get(block=True, timeout=1.0)
except Queue.Empty:
continue
if item == _THREAD_SHOULD_EXIT:
break
if item.state == STATE_READ and item.progress_key is None:
item.progress_key = self.db.StoreKeys(item.key_start, item.key_end)
else:
assert item.progress_key is not None
self.UpdateProgress(item)
item.progress_event.set()
self.progress_queue.task_done()
self.db.ThreadComplete()
class ProgressTrackerThread(_ProgressThreadBase):
"""A thread which records progress information for the upload process.
The progress information is stored into the provided progress database.
This class is not responsible for replaying a prior run's progress
information out of the database. Separate mechanisms must be used to
resume a prior upload attempt.
"""
NAME = 'progress tracking thread'
def __init__(self, progress_queue, progress_db):
"""Initialize the ProgressTrackerThread instance.
Args:
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
"""
_ProgressThreadBase.__init__(self, progress_queue, progress_db)
def UpdateProgress(self, item):
"""Update the state of the given WorkItem.
Args:
item: A WorkItem instance.
"""
self.db.UpdateState(item.progress_key, item.state)
if item.state == STATE_SENT:
self.entities_transferred += item.count
def WorkFinished(self):
"""Performs final actions after the entity transfer is complete."""
pass
class ExportProgressThread(_ProgressThreadBase):
"""A thread to record progress information and write record data for exports.
The progress information is stored into a provided progress database.
Exported results are stored in the result database and dumped to an output
file at the end of the download.
"""
def __init__(self, kind, progress_queue, progress_db, result_db):
"""Initialize the ExportProgressThread instance.
Args:
kind: The kind of entities being stored in the database.
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
result_db: The database for holding exported entities; should be an
instance of ResultDatabase.
"""
_ProgressThreadBase.__init__(self, progress_queue, progress_db)
self.kind = kind
self.existing_count = result_db.existing_count
self.result_db = result_db
def EntitiesTransferred(self):
"""Return the total number of unique entities transferred."""
return self.result_db.count
def WorkFinished(self):
"""Write the contents of the result database."""
exporter = Exporter.RegisteredExporter(self.kind)
exporter.output_entities(self.result_db.AllEntities())
def UpdateProgress(self, item):
"""Update the state of the given KeyRangeItem.
Args:
item: A KeyRange instance.
"""
if item.state == STATE_GOT:
count = self.result_db.StoreEntities(item.download_result.keys,
item.download_result.entities)
self.db.DeleteKey(item.progress_key)
self.entities_transferred += count
else:
self.db.UpdateState(item.progress_key, item.state)
class MapperProgressThread(_ProgressThreadBase):
"""A thread to record progress information for maps over the datastore."""
def __init__(self, kind, progress_queue, progress_db):
"""Initialize the MapperProgressThread instance.
Args:
kind: The kind of entities being stored in the database.
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
"""
_ProgressThreadBase.__init__(self, progress_queue, progress_db)
self.kind = kind
self.mapper = Mapper.RegisteredMapper(self.kind)
def EntitiesTransferred(self):
"""Return the total number of unique entities transferred."""
return self.entities_transferred
def WorkFinished(self):
"""Perform actions after map is complete."""
pass
def UpdateProgress(self, item):
"""Update the state of the given KeyRangeItem.
Args:
item: A KeyRange instance.
"""
if item.state == STATE_GOT:
self.entities_transferred += item.count
self.db.DeleteKey(item.progress_key)
else:
self.db.UpdateState(item.progress_key, item.state)
def ParseKey(key_string):
"""Turn a key stored in the database into a Key or None.
Args:
key_string: The string representation of a Key.
Returns:
A datastore.Key instance or None
"""
if not key_string:
return None
if key_string == 'None':
return None
return datastore.Key(encoded=key_string)
def Validate(value, typ):
"""Checks that value is non-empty and of the right type.
Args:
value: any value
typ: a type or tuple of types
Raises:
ValueError: if value is None or empty.
TypeError: if it's not the given type.
"""
if not value:
raise ValueError('Value should not be empty; received %s.' % value)
elif not isinstance(value, typ):
raise TypeError('Expected a %s, but received %s (a %s).' %
(typ, value, value.__class__))
def CheckFile(filename):
"""Check that the given file exists and can be opened for reading.
Args:
filename: The name of the file.
Raises:
FileNotFoundError: if the given filename is not found
FileNotReadableError: if the given filename is not readable.
"""
if not os.path.exists(filename):
raise FileNotFoundError('%s: file not found' % filename)
elif not os.access(filename, os.R_OK):
raise FileNotReadableError('%s: file not readable' % filename)
class Loader(object):
"""A base class for creating datastore entities from input data.
To add a handler for bulk loading a new entity kind into your datastore,
write a subclass of this class that calls Loader.__init__ from your
class's __init__.
If you need to run extra code to convert entities from the input
data, create new properties, or otherwise modify the entities before
they're inserted, override handle_entity.
See the create_entity method for the creation of entities from the
(parsed) input data.
"""
__loaders = {}
kind = None
__properties = None
def __init__(self, kind, properties):
"""Constructor.
Populates this Loader's kind and properties map.
Args:
kind: a string containing the entity kind that this loader handles
properties: list of (name, converter) tuples.
This is used to automatically convert the input columns into
properties. The converter should be a function that takes one
argument, a string value from the input file, and returns a
correctly typed property value that should be inserted. The
tuples in this list should match the columns in your input file,
in order.
For example:
[('name', str),
('id_number', int),
('email', datastore_types.Email),
('user', users.User),
('birthdate', lambda x: datetime.datetime.fromtimestamp(float(x))),
('description', datastore_types.Text),
]
"""
Validate(kind, (basestring, tuple))
self.kind = kind
self.__openfile = open
self.__create_csv_reader = csv.reader
GetImplementationClass(kind)
Validate(properties, list)
for name, fn in properties:
Validate(name, basestring)
assert callable(fn), (
'Conversion function %s for property %s is not callable.' % (fn, name))
self.__properties = properties
@staticmethod
def RegisterLoader(loader):
"""Register loader and the Loader instance for its kind.
Args:
loader: A Loader instance.
"""
Loader.__loaders[loader.kind] = loader
def alias_old_names(self):
"""Aliases method names so that Loaders defined with old names work."""
aliases = (
('CreateEntity', 'create_entity'),
('HandleEntity', 'handle_entity'),
('GenerateKey', 'generate_key'),
)
for old_name, new_name in aliases:
setattr(Loader, old_name, getattr(Loader, new_name))
if hasattr(self.__class__, old_name) and not (
getattr(self.__class__, old_name).im_func ==
getattr(Loader, new_name).im_func):
if hasattr(self.__class__, new_name) and not (
getattr(self.__class__, new_name).im_func ==
getattr(Loader, new_name).im_func):
raise NameClashError(old_name, new_name, self.__class__)
setattr(self, new_name, getattr(self, old_name))
def create_entity(self, values, key_name=None, parent=None):
"""Creates a entity from a list of property values.
Args:
values: list/tuple of str
key_name: if provided, the name for the (single) resulting entity
parent: A datastore.Key instance for the parent, or None
Returns:
list of db.Model
The returned entities are populated with the property values from the
argument, converted to native types using the properties map given in
the constructor, and passed through handle_entity. They're ready to be
inserted.
Raises:
AssertionError: if the number of values doesn't match the number
of properties in the properties map.
ValueError: if any element of values is None or empty.
TypeError: if values is not a list or tuple.
"""
Validate(values, (list, tuple))
assert len(values) == len(self.__properties), (
'Expected %d columns, found %d.' %
(len(self.__properties), len(values)))
model_class = GetImplementationClass(self.kind)
properties = {
'key_name': key_name,
'parent': parent,
}
for (name, converter), val in zip(self.__properties, values):
if converter is bool and val.lower() in ('0', 'false', 'no'):
val = False
properties[name] = converter(val)
entity = model_class(**properties)
entities = self.handle_entity(entity)
if entities:
if not isinstance(entities, (list, tuple)):
entities = [entities]
for entity in entities:
if not isinstance(entity, db.Model):
raise TypeError('Expected a db.Model, received %s (a %s).' %
(entity, entity.__class__))
return entities
def generate_key(self, i, values):
"""Generates a key_name to be used in creating the underlying object.
The default implementation returns None.
This method can be overridden to control the key generation for
uploaded entities. The value returned should be None (to use a
server generated numeric key), or a string which neither starts
with a digit nor has the form __*__ (see
http://code.google.com/appengine/docs/python/datastore/keysandentitygroups.html),
or a datastore.Key instance.
If you generate your own string keys, keep in mind:
1. The key name for each entity must be unique.
2. If an entity of the same kind and key already exists in the
datastore, it will be overwritten.
Args:
i: Number corresponding to this object (assume it's run in a loop,
this is your current count.
values: list/tuple of str.
Returns:
A string to be used as the key_name for an entity.
"""
return None
def handle_entity(self, entity):
"""Subclasses can override this to add custom entity conversion code.
This is called for each entity, after its properties are populated
from the input but before it is stored. Subclasses can override
this to add custom entity handling code.
The entity to be inserted should be returned. If multiple entities
should be inserted, return a list of entities. If no entities
should be inserted, return None or [].
Args:
entity: db.Model
Returns:
db.Model or list of db.Model
"""
return entity
def initialize(self, filename, loader_opts):
"""Performs initialization and validation of the input file.
This implementation checks that the input file exists and can be
opened for reading.
Args:
filename: The string given as the --filename flag argument.
loader_opts: The string given as the --loader_opts flag argument.
"""
CheckFile(filename)
def finalize(self):
"""Performs finalization actions after the upload completes."""
pass
def generate_records(self, filename):
"""Subclasses can override this to add custom data input code.
This method must yield fixed-length lists of strings.
The default implementation uses csv.reader to read CSV rows
from filename.
Args:
filename: The string input for the --filename option.
Yields:
Lists of strings.
"""
csv_generator = CSVGenerator(filename, openfile=self.__openfile,
create_csv_reader=self.__create_csv_reader
).Records()
return csv_generator
@staticmethod
def RegisteredLoaders():
"""Returns a dict of the Loader instances that have been created."""
return dict(Loader.__loaders)
@staticmethod
def RegisteredLoader(kind):
"""Returns the loader instance for the given kind if it exists."""
return Loader.__loaders[kind]
class RestoreThread(_ThreadBase):
"""A thread to read saved entity_pbs from sqlite3."""
NAME = 'RestoreThread'
_ENTITIES_DONE = 'Entities Done'
def __init__(self, queue, filename):
_ThreadBase.__init__(self)
self.queue = queue
self.filename = filename
def PerformWork(self):
db_conn = sqlite3.connect(self.filename)
cursor = db_conn.cursor()
cursor.execute('select id, value from result')
for entity_id, value in cursor:
self.queue.put([entity_id, value], block=True)
self.queue.put(RestoreThread._ENTITIES_DONE, block=True)
class RestoreLoader(Loader):
"""A Loader which imports protobuffers from a file."""
def __init__(self, kind, app_id):
self.kind = kind
self.app_id = app_id
def initialize(self, filename, loader_opts):
CheckFile(filename)
self.queue = Queue.Queue(1000)
restore_thread = RestoreThread(self.queue, filename)
restore_thread.start()
def generate_records(self, filename):
while True:
record = self.queue.get(block=True)
if id(record) == id(RestoreThread._ENTITIES_DONE):
break
yield record
def create_entity(self, values, key_name=None, parent=None):
def convert_key(key, app_id):
path = key.to_path()
kwargs = {'_app_id_namespace': app_id}
return db.Key.from_path(*path,**kwargs)
import copy
key = StrKey(unicode(values[0], 'utf-8'))
entity_proto = entity_pb.EntityProto(contents=str(values[1]))
entity_proto.mutable_key().CopyFrom(key._Key__reference)
entity = datastore.Entity._FromPb(entity_proto)
new_entity = copy.copy(entity)
for k,v in entity.iteritems():
if isinstance(v, db.Key):
new_entity[k] = convert_key(v, self.app_id)
if isinstance(v, list):
new_list = []
for item in v:
if isinstance(item, db.Key):
new_list.append(convert_key(item, self.app_id))
else:
new_list.append(item)
new_entity[k] = new_list
return new_entity
class Exporter(object):
"""A base class for serializing datastore entities.
To add a handler for exporting an entity kind from your datastore,
write a subclass of this class that calls Exporter.__init__ from your
class's __init__.
If you need to run extra code to convert entities from the input
data, create new properties, or otherwise modify the entities before
they're inserted, override handle_entity.
See the output_entities method for the writing of data from entities.
"""
__exporters = {}
kind = None
__properties = None
def __init__(self, kind, properties):
"""Constructor.
Populates this Exporters's kind and properties map.
Args:
kind: a string containing the entity kind that this exporter handles
properties: list of (name, converter, default) tuples.
This is used to automatically convert the entities to strings.
The converter should be a function that takes one argument, a property
value of the appropriate type, and returns a str or unicode. The default
is a string to be used if the property is not present, or None to fail
with an error if the property is missing.
For example:
[('name', str, None),
('id_number', str, None),
('email', str, ''),
('user', str, None),
('birthdate',
lambda x: str(datetime.datetime.fromtimestamp(float(x))),
None),
('description', str, ''),
]
"""
Validate(kind, basestring)
self.kind = kind
GetImplementationClass(kind)
Validate(properties, list)
for name, fn, default in properties:
Validate(name, basestring)
assert callable(fn), (
'Conversion function %s for property %s is not callable.' % (
fn, name))
if default:
Validate(default, basestring)
self.__properties = properties
@staticmethod
def RegisterExporter(exporter):
"""Register exporter and the Exporter instance for its kind.
Args:
exporter: A Exporter instance.
"""
Exporter.__exporters[exporter.kind] = exporter
def __ExtractProperties(self, entity):
"""Converts an entity into a list of string values.
Args:
entity: An entity to extract the properties from.
Returns:
A list of the properties of the entity.
Raises:
MissingPropertyError: if an expected field on the entity is missing.
"""
encoding = []
for name, fn, default in self.__properties:
try:
encoding.append(fn(entity[name]))
except AttributeError:
if default is None:
raise MissingPropertyError(name)
else:
encoding.append(default)
return encoding
def __EncodeEntity(self, entity):
"""Convert the given entity into CSV string.
Args:
entity: The entity to encode.
Returns:
A CSV string.
"""
output = StringIO.StringIO()
writer = csv.writer(output, lineterminator='')
writer.writerow(self.__ExtractProperties(entity))
return output.getvalue()
def __SerializeEntity(self, entity):
"""Creates a string representation of an entity.
Args:
entity: The entity to serialize.
Returns:
A serialized representation of an entity.
"""
encoding = self.__EncodeEntity(entity)
if not isinstance(encoding, unicode):
encoding = unicode(encoding, 'utf-8')
encoding = encoding.encode('utf-8')
return encoding
def output_entities(self, entity_generator):
"""Outputs the downloaded entities.
This implementation writes CSV.
Args:
entity_generator: A generator that yields the downloaded entities
in key order.
"""
CheckOutputFile(self.output_filename)
output_file = open(self.output_filename, 'w')
logger.debug('Export complete, writing to file')
output_file.writelines(self.__SerializeEntity(entity) + '\n'
for entity in entity_generator)
def initialize(self, filename, exporter_opts):
"""Performs initialization and validation of the output file.
This implementation checks that the input file exists and can be
opened for writing.
Args:
filename: The string given as the --filename flag argument.
exporter_opts: The string given as the --exporter_opts flag argument.
"""
CheckOutputFile(filename)
self.output_filename = filename
def finalize(self):
"""Performs finalization actions after the download completes."""
pass
@staticmethod
def RegisteredExporters():
"""Returns a dictionary of the exporter instances that have been created."""
return dict(Exporter.__exporters)
@staticmethod
def RegisteredExporter(kind):
"""Returns an exporter instance for the given kind if it exists."""
return Exporter.__exporters[kind]
class DumpExporter(Exporter):
"""An exporter which dumps protobuffers to a file."""
def __init__(self, kind, result_db_filename):
self.kind = kind
self.result_db_filename = result_db_filename
def output_entities(self, entity_generator):
shutil.copyfile(self.result_db_filename, self.output_filename)
class MapperRetry(Error):
"""An exception that indicates a non-fatal error during mapping."""
class Mapper(object):
"""A base class for serializing datastore entities.
To add a handler for exporting an entity kind from your datastore,
write a subclass of this class that calls Mapper.__init__ from your
class's __init__.
You need to implement to batch_apply or apply method on your subclass
for the map to do anything.
"""
__mappers = {}
kind = None
def __init__(self, kind):
"""Constructor.
Populates this Mappers's kind.
Args:
kind: a string containing the entity kind that this mapper handles
"""
Validate(kind, basestring)
self.kind = kind
GetImplementationClass(kind)
@staticmethod
def RegisterMapper(mapper):
"""Register mapper and the Mapper instance for its kind.
Args:
mapper: A Mapper instance.
"""
Mapper.__mappers[mapper.kind] = mapper
def initialize(self, mapper_opts):
"""Performs initialization.
Args:
mapper_opts: The string given as the --mapper_opts flag argument.
"""
pass
def finalize(self):
"""Performs finalization actions after the download completes."""
pass
def apply(self, entity):
print 'Default map function doing nothing to %s' % entity
def batch_apply(self, entities):
for entity in entities:
self.apply(entity)
@staticmethod
def RegisteredMappers():
"""Returns a dictionary of the mapper instances that have been created."""
return dict(Mapper.__mappers)
@staticmethod
def RegisteredMapper(kind):
"""Returns an mapper instance for the given kind if it exists."""
return Mapper.__mappers[kind]
class QueueJoinThread(threading.Thread):
"""A thread that joins a queue and exits.
Queue joins do not have a timeout. To simulate a queue join with
timeout, run this thread and join it with a timeout.
"""
def __init__(self, queue):
"""Initialize a QueueJoinThread.
Args:
queue: The queue for this thread to join.
"""
threading.Thread.__init__(self)
assert isinstance(queue, (Queue.Queue, ReQueue))
self.queue = queue
def run(self):
"""Perform the queue join in this thread."""
self.queue.join()
def InterruptibleQueueJoin(queue,
thread_local,
thread_pool,
queue_join_thread_factory=QueueJoinThread,
check_workers=True):
"""Repeatedly joins the given ReQueue or Queue.Queue with short timeout.
Between each timeout on the join, worker threads are checked.
Args:
queue: A Queue.Queue or ReQueue instance.
thread_local: A threading.local instance which indicates interrupts.
thread_pool: An AdaptiveThreadPool instance.
queue_join_thread_factory: Used for dependency injection.
check_workers: Whether to interrupt the join on worker death.
Returns:
True unless the queue join is interrupted by SIGINT or worker death.
"""
thread = queue_join_thread_factory(queue)
thread.start()
while True:
thread.join(timeout=.5)
if not thread.isAlive():
return True
if thread_local.shut_down:
logger.debug('Queue join interrupted')
return False
if check_workers:
for worker_thread in thread_pool.Threads():
if not worker_thread.isAlive():
return False
def ShutdownThreads(data_source_thread, thread_pool):
"""Shuts down the worker and data source threads.
Args:
data_source_thread: A running DataSourceThread instance.
thread_pool: An AdaptiveThreadPool instance with workers registered.
"""
logger.info('An error occurred. Shutting down...')
data_source_thread.exit_flag = True
thread_pool.Shutdown()
data_source_thread.join(timeout=3.0)
if data_source_thread.isAlive():
logger.warn('%s hung while trying to exit',
data_source_thread.GetFriendlyName())
class BulkTransporterApp(object):
"""Class to wrap bulk transport application functionality."""
def __init__(self,
arg_dict,
input_generator_factory,
throttle,
progress_db,
progresstrackerthread_factory,
max_queue_size=DEFAULT_QUEUE_SIZE,
request_manager_factory=RequestManager,
datasourcethread_factory=DataSourceThread,
progress_queue_factory=Queue.Queue,
thread_pool_factory=adaptive_thread_pool.AdaptiveThreadPool):
"""Instantiate a BulkTransporterApp.
Uploads or downloads data to or from application using HTTP requests.
When run, the class will spin up a number of threads to read entities
from the data source, pass those to a number of worker threads
for sending to the application, and track all of the progress in a
small database in case an error or pause/termination requires a
restart/resumption of the upload process.
Args:
arg_dict: Dictionary of command line options.
input_generator_factory: A factory that creates a WorkItem generator.
throttle: A Throttle instance.
progress_db: The database to use for replaying/recording progress.
progresstrackerthread_factory: Used for dependency injection.
max_queue_size: Maximum size of the queues before they should block.
request_manager_factory: Used for dependency injection.
datasourcethread_factory: Used for dependency injection.
progress_queue_factory: Used for dependency injection.
thread_pool_factory: Used for dependency injection.
"""
self.app_id = arg_dict['app_id']
self.post_url = arg_dict['url']
self.kind = arg_dict['kind']
self.batch_size = arg_dict['batch_size']
self.input_generator_factory = input_generator_factory
self.num_threads = arg_dict['num_threads']
self.email = arg_dict['email']
self.passin = arg_dict['passin']
self.dry_run = arg_dict['dry_run']
self.throttle = throttle
self.progress_db = progress_db
self.progresstrackerthread_factory = progresstrackerthread_factory
self.max_queue_size = max_queue_size
self.request_manager_factory = request_manager_factory
self.datasourcethread_factory = datasourcethread_factory
self.progress_queue_factory = progress_queue_factory
self.thread_pool_factory = thread_pool_factory
(scheme,
self.host_port, self.url_path,
unused_query, unused_fragment) = urlparse.urlsplit(self.post_url)
self.secure = (scheme == 'https')
def Run(self):
"""Perform the work of the BulkTransporterApp.
Raises:
AuthenticationError: If authentication is required and fails.
Returns:
Error code suitable for sys.exit, e.g. 0 on success, 1 on failure.
"""
self.error = False
thread_pool = self.thread_pool_factory(
self.num_threads, queue_size=self.max_queue_size)
self.throttle.Register(threading.currentThread())
threading.currentThread().exit_flag = False
progress_queue = self.progress_queue_factory(self.max_queue_size)
request_manager = self.request_manager_factory(self.app_id,
self.host_port,
self.url_path,
self.kind,
self.throttle,
self.batch_size,
self.secure,
self.email,
self.passin,
self.dry_run)
try:
request_manager.Authenticate()
except Exception, e:
self.error = True
if not isinstance(e, urllib2.HTTPError) or (
e.code != 302 and e.code != 401):
logger.exception('Exception during authentication')
raise AuthenticationError()
if (request_manager.auth_called and
not request_manager.authenticated):
self.error = True
raise AuthenticationError('Authentication failed')
for thread in thread_pool.Threads():
self.throttle.Register(thread)
self.progress_thread = self.progresstrackerthread_factory(
progress_queue, self.progress_db)
if self.progress_db.UseProgressData():
logger.debug('Restarting upload using progress database')
progress_generator_factory = self.progress_db.GetProgressStatusGenerator
else:
progress_generator_factory = None
self.data_source_thread = (
self.datasourcethread_factory(request_manager,
thread_pool,
progress_queue,
self.input_generator_factory,
progress_generator_factory))
thread_local = threading.local()
thread_local.shut_down = False
def Interrupt(unused_signum, unused_frame):
"""Shutdown gracefully in response to a signal."""
thread_local.shut_down = True
self.error = True
signal.signal(signal.SIGINT, Interrupt)
self.progress_thread.start()
self.data_source_thread.start()
while not thread_local.shut_down:
self.data_source_thread.join(timeout=0.25)
if self.data_source_thread.isAlive():
for thread in list(thread_pool.Threads()) + [self.progress_thread]:
if not thread.isAlive():
logger.info('Unexpected thread death: %s', thread.getName())
thread_local.shut_down = True
self.error = True
break
else:
break
def _Join(ob, msg):
logger.debug('Waiting for %s...', msg)
if isinstance(ob, threading.Thread):
ob.join(timeout=3.0)
if ob.isAlive():
logger.debug('Joining %s failed', ob)
else:
logger.debug('... done.')
elif isinstance(ob, (Queue.Queue, ReQueue)):
if not InterruptibleQueueJoin(ob, thread_local, thread_pool):
ShutdownThreads(self.data_source_thread, thread_pool)
else:
ob.join()
logger.debug('... done.')
if self.data_source_thread.error or thread_local.shut_down:
ShutdownThreads(self.data_source_thread, thread_pool)
else:
_Join(thread_pool.requeue, 'worker threads to finish')
thread_pool.Shutdown()
thread_pool.JoinThreads()
thread_pool.CheckErrors()
print ''
if self.progress_thread.isAlive():
InterruptibleQueueJoin(progress_queue, thread_local, thread_pool,
check_workers=False)
else:
logger.warn('Progress thread exited prematurely')
progress_queue.put(_THREAD_SHOULD_EXIT)
_Join(self.progress_thread, 'progress_thread to terminate')
self.progress_thread.CheckError()
if not thread_local.shut_down:
self.progress_thread.WorkFinished()
self.data_source_thread.CheckError()
return self.ReportStatus()
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
raise NotImplementedError()
class BulkUploaderApp(BulkTransporterApp):
"""Class to encapsulate bulk uploader functionality."""
def __init__(self, *args, **kwargs):
BulkTransporterApp.__init__(self, *args, **kwargs)
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
total_up, duration = self.throttle.TotalTransferred(
remote_api_throttle.BANDWIDTH_UP)
s_total_up, unused_duration = self.throttle.TotalTransferred(
remote_api_throttle.HTTPS_BANDWIDTH_UP)
total_up += s_total_up
total = total_up
logger.info('%d entites total, %d previously transferred',
self.data_source_thread.read_count,
self.data_source_thread.xfer_count)
transfer_count = self.progress_thread.EntitiesTransferred()
logger.info('%d entities (%d bytes) transferred in %.1f seconds',
transfer_count, total, duration)
if (self.data_source_thread.read_all and
transfer_count +
self.data_source_thread.xfer_count >=
self.data_source_thread.read_count):
logger.info('All entities successfully transferred')
return 0
else:
logger.info('Some entities not successfully transferred')
return 1
class BulkDownloaderApp(BulkTransporterApp):
"""Class to encapsulate bulk downloader functionality."""
def __init__(self, *args, **kwargs):
BulkTransporterApp.__init__(self, *args, **kwargs)
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
total_down, duration = self.throttle.TotalTransferred(
remote_api_throttle.BANDWIDTH_DOWN)
s_total_down, unused_duration = self.throttle.TotalTransferred(
remote_api_throttle.HTTPS_BANDWIDTH_DOWN)
total_down += s_total_down
total = total_down
existing_count = self.progress_thread.existing_count
xfer_count = self.progress_thread.EntitiesTransferred()
logger.info('Have %d entities, %d previously transferred',
xfer_count, existing_count)
logger.info('%d entities (%d bytes) transferred in %.1f seconds',
xfer_count, total, duration)
if self.error:
return 1
else:
return 0
class BulkMapperApp(BulkTransporterApp):
"""Class to encapsulate bulk map functionality."""
def __init__(self, *args, **kwargs):
BulkTransporterApp.__init__(self, *args, **kwargs)
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
total_down, duration = self.throttle.TotalTransferred(
remote_api_throttle.BANDWIDTH_DOWN)
s_total_down, unused_duration = self.throttle.TotalTransferred(
remote_api_throttle.HTTPS_BANDWIDTH_DOWN)
total_down += s_total_down
total = total_down
xfer_count = self.progress_thread.EntitiesTransferred()
logger.info('The following may be inaccurate if any mapper tasks '
'encountered errors and had to be retried.')
logger.info('Applied mapper to %s entities.',
xfer_count)
logger.info('%s entities (%s bytes) transferred in %.1f seconds',
xfer_count, total, duration)
if self.error:
return 1
else:
return 0
def PrintUsageExit(code):
"""Prints usage information and exits with a status code.
Args:
code: Status code to pass to sys.exit() after displaying usage information.
"""
print __doc__ % {'arg0': sys.argv[0]}
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
REQUIRED_OPTION = object()
FLAG_SPEC = ['debug',
'help',
'url=',
'filename=',
'batch_size=',
'kind=',
'num_threads=',
'bandwidth_limit=',
'rps_limit=',
'http_limit=',
'db_filename=',
'app_id=',
'config_file=',
'has_header',
'csv_has_header',
'auth_domain=',
'result_db_filename=',
'download',
'loader_opts=',
'exporter_opts=',
'log_file=',
'mapper_opts=',
'email=',
'passin',
'map',
'dry_run',
'dump',
'restore',
]
def ParseArguments(argv, die_fn=lambda: PrintUsageExit(1)):
"""Parses command-line arguments.
Prints out a help message if -h or --help is supplied.
Args:
argv: List of command-line arguments.
die_fn: Function to invoke to end the program.
Returns:
A dictionary containing the value of command-line options.
"""
opts, unused_args = getopt.getopt(
argv[1:],
'h',
FLAG_SPEC)
arg_dict = {}
arg_dict['url'] = REQUIRED_OPTION
arg_dict['filename'] = None
arg_dict['config_file'] = None
arg_dict['kind'] = None
arg_dict['batch_size'] = None
arg_dict['num_threads'] = DEFAULT_THREAD_COUNT
arg_dict['bandwidth_limit'] = DEFAULT_BANDWIDTH_LIMIT
arg_dict['rps_limit'] = DEFAULT_RPS_LIMIT
arg_dict['http_limit'] = DEFAULT_REQUEST_LIMIT
arg_dict['db_filename'] = None
arg_dict['app_id'] = ''
arg_dict['auth_domain'] = 'gmail.com'
arg_dict['has_header'] = False
arg_dict['result_db_filename'] = None
arg_dict['download'] = False
arg_dict['loader_opts'] = None
arg_dict['exporter_opts'] = None
arg_dict['debug'] = False
arg_dict['log_file'] = None
arg_dict['email'] = None
arg_dict['passin'] = False
arg_dict['mapper_opts'] = None
arg_dict['map'] = False
arg_dict['dry_run'] = False
arg_dict['dump'] = False
arg_dict['restore'] = False
def ExpandFilename(filename):
"""Expand shell variables and ~usernames in filename."""
return os.path.expandvars(os.path.expanduser(filename))
for option, value in opts:
if option == '--debug':
arg_dict['debug'] = True
elif option in ('-h', '--help'):
PrintUsageExit(0)
elif option == '--url':
arg_dict['url'] = value
elif option == '--filename':
arg_dict['filename'] = ExpandFilename(value)
elif option == '--batch_size':
arg_dict['batch_size'] = int(value)
elif option == '--kind':
arg_dict['kind'] = value
elif option == '--num_threads':
arg_dict['num_threads'] = int(value)
elif option == '--bandwidth_limit':
arg_dict['bandwidth_limit'] = int(value)
elif option == '--rps_limit':
arg_dict['rps_limit'] = int(value)
elif option == '--http_limit':
arg_dict['http_limit'] = int(value)
elif option == '--db_filename':
arg_dict['db_filename'] = ExpandFilename(value)
elif option == '--app_id':
arg_dict['app_id'] = value
elif option == '--config_file':
arg_dict['config_file'] = ExpandFilename(value)
elif option == '--auth_domain':
arg_dict['auth_domain'] = value
elif option == '--has_header':
arg_dict['has_header'] = True
elif option == '--csv_has_header':
print >>sys.stderr, ('--csv_has_header is deprecated, please use '
'--has_header.')
arg_dict['has_header'] = True
elif option == '--result_db_filename':
arg_dict['result_db_filename'] = ExpandFilename(value)
elif option == '--download':
arg_dict['download'] = True
elif option == '--loader_opts':
arg_dict['loader_opts'] = value
elif option == '--exporter_opts':
arg_dict['exporter_opts'] = value
elif option == '--log_file':
arg_dict['log_file'] = ExpandFilename(value)
elif option == '--email':
arg_dict['email'] = value
elif option == '--passin':
arg_dict['passin'] = True
elif option == '--map':
arg_dict['map'] = True
elif option == '--mapper_opts':
arg_dict['mapper_opts'] = value
elif option == '--dry_run':
arg_dict['dry_run'] = True
elif option == '--dump':
arg_dict['dump'] = True
elif option == '--restore':
arg_dict['restore'] = True
return ProcessArguments(arg_dict, die_fn=die_fn)
def ThrottleLayout(bandwidth_limit, http_limit, rps_limit):
"""Return a dictionary indicating the throttle options."""
bulkloader_limits = dict(remote_api_throttle.NO_LIMITS)
bulkloader_limits.update({
remote_api_throttle.BANDWIDTH_UP: bandwidth_limit,
remote_api_throttle.BANDWIDTH_DOWN: bandwidth_limit,
remote_api_throttle.REQUESTS: http_limit,
remote_api_throttle.HTTPS_BANDWIDTH_UP: bandwidth_limit,
remote_api_throttle.HTTPS_BANDWIDTH_DOWN: bandwidth_limit,
remote_api_throttle.HTTPS_REQUESTS: http_limit,
remote_api_throttle.ENTITIES_FETCHED: rps_limit,
remote_api_throttle.ENTITIES_MODIFIED: rps_limit,
})
return bulkloader_limits
def CheckOutputFile(filename):
"""Check that the given file does not exist and can be opened for writing.
Args:
filename: The name of the file.
Raises:
FileExistsError: if the given filename is not found
FileNotWritableError: if the given filename is not readable.
"""
full_path = os.path.abspath(filename)
if os.path.exists(full_path):
raise FileExistsError('%s: output file exists' % filename)
elif not os.access(os.path.dirname(full_path), os.W_OK):
raise FileNotWritableError(
'%s: not writable' % os.path.dirname(full_path))
def LoadConfig(config_file_name, exit_fn=sys.exit):
"""Loads a config file and registers any Loader classes present.
Args:
config_file_name: The name of the configuration file.
exit_fn: Used for dependency injection.
"""
if config_file_name:
config_file = open(config_file_name, 'r')
try:
bulkloader_config = imp.load_module(
'bulkloader_config', config_file, config_file_name,
('', 'r', imp.PY_SOURCE))
sys.modules['bulkloader_config'] = bulkloader_config
if hasattr(bulkloader_config, 'loaders'):
for cls in bulkloader_config.loaders:
Loader.RegisterLoader(cls())
if hasattr(bulkloader_config, 'exporters'):
for cls in bulkloader_config.exporters:
Exporter.RegisterExporter(cls())
if hasattr(bulkloader_config, 'mappers'):
for cls in bulkloader_config.mappers:
Mapper.RegisterMapper(cls())
except NameError, e:
m = re.search(r"[^']*'([^']*)'.*", str(e))
if m.groups() and m.group(1) == 'Loader':
print >>sys.stderr, """
The config file format has changed and you appear to be using an old-style
config file. Please make the following changes:
1. At the top of the file, add this:
from google.appengine.tools.bulkloader import Loader
2. For each of your Loader subclasses add the following at the end of the
__init__ definitioion:
self.alias_old_names()
3. At the bottom of the file, add this:
loaders = [MyLoader1,...,MyLoaderN]
Where MyLoader1,...,MyLoaderN are the Loader subclasses you want the bulkloader
to have access to.
"""
exit_fn(1)
else:
raise
except Exception, e:
if isinstance(e, NameClashError) or 'bulkloader_config' in vars() and (
hasattr(bulkloader_config, 'bulkloader') and
isinstance(e, bulkloader_config.bulkloader.NameClashError)):
print >> sys.stderr, (
'Found both %s and %s while aliasing old names on %s.'%
(e.old_name, e.new_name, e.klass))
exit_fn(1)
else:
raise
def GetArgument(kwargs, name, die_fn):
"""Get the value of the key name in kwargs, or die with die_fn.
Args:
kwargs: A dictionary containing the options for the bulkloader.
name: The name of a bulkloader option.
die_fn: The function to call to exit the program.
Returns:
The value of kwargs[name] is name in kwargs
"""
if name in kwargs:
return kwargs[name]
else:
print >>sys.stderr, '%s argument required' % name
die_fn()
def _MakeSignature(app_id=None,
url=None,
kind=None,
db_filename=None,
perform_map=None,
download=None,
has_header=None,
result_db_filename=None,
dump=None,
restore=None):
"""Returns a string that identifies the important options for the database."""
if download:
result_db_line = 'result_db: %s' % result_db_filename
else:
result_db_line = ''
return u"""
app_id: %s
url: %s
kind: %s
download: %s
map: %s
dump: %s
restore: %s
progress_db: %s
has_header: %s
%s
""" % (app_id, url, kind, download, perform_map, dump, restore, db_filename,
has_header, result_db_line)
def ProcessArguments(arg_dict,
die_fn=lambda: sys.exit(1)):
"""Processes non command-line input arguments.
Args:
arg_dict: Dictionary containing the values of bulkloader options.
die_fn: Function to call in case of an error during argument processing.
Returns:
A dictionary of bulkloader options.
"""
app_id = GetArgument(arg_dict, 'app_id', die_fn)
url = GetArgument(arg_dict, 'url', die_fn)
dump = GetArgument(arg_dict, 'dump', die_fn)
restore = GetArgument(arg_dict, 'restore', die_fn)
filename = GetArgument(arg_dict, 'filename', die_fn)
batch_size = GetArgument(arg_dict, 'batch_size', die_fn)
kind = GetArgument(arg_dict, 'kind', die_fn)
db_filename = GetArgument(arg_dict, 'db_filename', die_fn)
config_file = GetArgument(arg_dict, 'config_file', die_fn)
result_db_filename = GetArgument(arg_dict, 'result_db_filename', die_fn)
download = GetArgument(arg_dict, 'download', die_fn)
log_file = GetArgument(arg_dict, 'log_file', die_fn)
perform_map = GetArgument(arg_dict, 'map', die_fn)
errors = []
if batch_size is None:
if download or perform_map:
arg_dict['batch_size'] = DEFAULT_DOWNLOAD_BATCH_SIZE
else:
arg_dict['batch_size'] = DEFAULT_BATCH_SIZE
elif batch_size <= 0:
errors.append('batch_size must be at least 1')
if db_filename is None:
arg_dict['db_filename'] = time.strftime(
'bulkloader-progress-%Y%m%d.%H%M%S.sql3')
if result_db_filename is None:
arg_dict['result_db_filename'] = time.strftime(
'bulkloader-results-%Y%m%d.%H%M%S.sql3')
if log_file is None:
arg_dict['log_file'] = time.strftime('bulkloader-log-%Y%m%d.%H%M%S')
required = '%s argument required'
if config_file is None and not dump and not restore:
errors.append('One of --config_file, --dump, or --restore is required')
if url is REQUIRED_OPTION:
errors.append(required % 'url')
if not filename and not perform_map:
errors.append(required % 'filename')
if kind is None:
if download or map:
errors.append('kind argument required for this operation')
elif not dump and not restore:
errors.append(
'kind argument required unless --dump or --restore is specified')
if not app_id:
if url and url is not REQUIRED_OPTION:
(unused_scheme, host_port, unused_url_path,
unused_query, unused_fragment) = urlparse.urlsplit(url)
suffix_idx = host_port.find('.appspot.com')
if suffix_idx > -1:
arg_dict['app_id'] = host_port[:suffix_idx]
elif host_port.split(':')[0].endswith('google.com'):
arg_dict['app_id'] = host_port.split('.')[0]
else:
errors.append('app_id argument required for non appspot.com domains')
if errors:
print >>sys.stderr, '\n'.join(errors)
die_fn()
return arg_dict
def ParseKind(kind):
if kind and kind[0] == '(' and kind[-1] == ')':
return tuple(kind[1:-1].split(','))
else:
return kind
def _PerformBulkload(arg_dict,
check_file=CheckFile,
check_output_file=CheckOutputFile):
"""Runs the bulkloader, given the command line options.
Args:
arg_dict: Dictionary of bulkloader options.
check_file: Used for dependency injection.
check_output_file: Used for dependency injection.
Returns:
An exit code.
Raises:
ConfigurationError: if inconsistent options are passed.
"""
app_id = arg_dict['app_id']
url = arg_dict['url']
filename = arg_dict['filename']
batch_size = arg_dict['batch_size']
kind = arg_dict['kind']
num_threads = arg_dict['num_threads']
bandwidth_limit = arg_dict['bandwidth_limit']
rps_limit = arg_dict['rps_limit']
http_limit = arg_dict['http_limit']
db_filename = arg_dict['db_filename']
config_file = arg_dict['config_file']
auth_domain = arg_dict['auth_domain']
has_header = arg_dict['has_header']
download = arg_dict['download']
result_db_filename = arg_dict['result_db_filename']
loader_opts = arg_dict['loader_opts']
exporter_opts = arg_dict['exporter_opts']
mapper_opts = arg_dict['mapper_opts']
email = arg_dict['email']
passin = arg_dict['passin']
perform_map = arg_dict['map']
dump = arg_dict['dump']
restore = arg_dict['restore']
os.environ['AUTH_DOMAIN'] = auth_domain
kind = ParseKind(kind)
if not dump and not restore:
check_file(config_file)
if download and perform_map:
logger.error('--download and --map are mutually exclusive.')
if download or dump:
check_output_file(filename)
elif not perform_map:
check_file(filename)
if dump:
Exporter.RegisterExporter(DumpExporter(kind, result_db_filename))
elif restore:
Loader.RegisterLoader(RestoreLoader(kind, app_id))
else:
LoadConfig(config_file)
os.environ['APPLICATION_ID'] = app_id
throttle_layout = ThrottleLayout(bandwidth_limit, http_limit, rps_limit)
logger.info('Throttling transfers:')
logger.info('Bandwidth: %s bytes/second', bandwidth_limit)
logger.info('HTTP connections: %s/second', http_limit)
logger.info('Entities inserted/fetched/modified: %s/second', rps_limit)
throttle = remote_api_throttle.Throttle(layout=throttle_layout)
signature = _MakeSignature(app_id=app_id,
url=url,
kind=kind,
db_filename=db_filename,
download=download,
perform_map=perform_map,
has_header=has_header,
result_db_filename=result_db_filename,
dump=dump,
restore=restore)
max_queue_size = max(DEFAULT_QUEUE_SIZE, 3 * num_threads + 5)
if db_filename == 'skip':
progress_db = StubProgressDatabase()
elif not download and not perform_map and not dump:
progress_db = ProgressDatabase(db_filename, signature)
else:
progress_db = ExportProgressDatabase(db_filename, signature)
return_code = 1
if not download and not perform_map and not dump:
loader = Loader.RegisteredLoader(kind)
try:
loader.initialize(filename, loader_opts)
workitem_generator_factory = GetCSVGeneratorFactory(
kind, filename, batch_size, has_header)
app = BulkUploaderApp(arg_dict,
workitem_generator_factory,
throttle,
progress_db,
ProgressTrackerThread,
max_queue_size,
RequestManager,
DataSourceThread,
Queue.Queue)
try:
return_code = app.Run()
except AuthenticationError:
logger.info('Authentication Failed')
finally:
loader.finalize()
elif not perform_map:
result_db = ResultDatabase(result_db_filename, signature)
exporter = Exporter.RegisteredExporter(kind)
try:
exporter.initialize(filename, exporter_opts)
def KeyRangeGeneratorFactory(request_manager, progress_queue,
progress_gen):
return KeyRangeItemGenerator(request_manager, kind, progress_queue,
progress_gen, DownloadItem)
def ExportProgressThreadFactory(progress_queue, progress_db):
return ExportProgressThread(kind,
progress_queue,
progress_db,
result_db)
app = BulkDownloaderApp(arg_dict,
KeyRangeGeneratorFactory,
throttle,
progress_db,
ExportProgressThreadFactory,
0,
RequestManager,
DataSourceThread,
Queue.Queue)
try:
return_code = app.Run()
except AuthenticationError:
logger.info('Authentication Failed')
finally:
exporter.finalize()
elif not download:
mapper = Mapper.RegisteredMapper(kind)
try:
mapper.initialize(mapper_opts)
def KeyRangeGeneratorFactory(request_manager, progress_queue,
progress_gen):
return KeyRangeItemGenerator(request_manager, kind, progress_queue,
progress_gen, MapperItem)
def MapperProgressThreadFactory(progress_queue, progress_db):
return MapperProgressThread(kind,
progress_queue,
progress_db)
app = BulkMapperApp(arg_dict,
KeyRangeGeneratorFactory,
throttle,
progress_db,
MapperProgressThreadFactory,
0,
RequestManager,
DataSourceThread,
Queue.Queue)
try:
return_code = app.Run()
except AuthenticationError:
logger.info('Authentication Failed')
finally:
mapper.finalize()
return return_code
def SetupLogging(arg_dict):
"""Sets up logging for the bulkloader.
Args:
arg_dict: Dictionary mapping flag names to their arguments.
"""
format = '[%(levelname)-8s %(asctime)s %(filename)s] %(message)s'
debug = arg_dict['debug']
log_file = arg_dict['log_file']
logger.setLevel(logging.DEBUG)
logger.propagate = False
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter(format)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
console = logging.StreamHandler()
level = logging.INFO
if debug:
level = logging.DEBUG
console.setLevel(level)
console_format = '[%(levelname)-8s] %(message)s'
formatter = logging.Formatter(console_format)
console.setFormatter(formatter)
logger.addHandler(console)
logger.info('Logging to %s', log_file)
remote_api_throttle.logger.setLevel(level)
remote_api_throttle.logger.addHandler(file_handler)
remote_api_throttle.logger.addHandler(console)
appengine_rpc.logger.setLevel(logging.WARN)
adaptive_thread_pool.logger.setLevel(logging.DEBUG)
adaptive_thread_pool.logger.addHandler(console)
adaptive_thread_pool.logger.addHandler(file_handler)
adaptive_thread_pool.logger.propagate = False
def Run(arg_dict):
"""Sets up and runs the bulkloader, given the options as keyword arguments.
Args:
arg_dict: Dictionary of bulkloader options
Returns:
An exit code.
"""
arg_dict = ProcessArguments(arg_dict)
SetupLogging(arg_dict)
return _PerformBulkload(arg_dict)
def main(argv):
"""Runs the importer from the command line."""
arg_dict = ParseArguments(argv)
errors = ['%s argument required' % key
for (key, value) in arg_dict.iteritems()
if value is REQUIRED_OPTION]
if errors:
print >>sys.stderr, '\n'.join(errors)
PrintUsageExit(1)
SetupLogging(arg_dict)
return _PerformBulkload(arg_dict)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mit
|
adieu/allbuttonspressed
|
docutils/parsers/rst/languages/eo.py
|
6
|
3808
|
# $Id: eo.py 6460 2010-10-29 22:18:44Z milde $
# Author: Marcelo Huerta San Martin <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Esperanto-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atentu': 'attention',
u'zorgu': 'caution',
u'dangxero': 'danger',
u'dan\u011dero': 'danger',
u'eraro': 'error',
u'spuro': 'hint',
u'grava': 'important',
u'noto': 'note',
u'helpeto': 'tip',
u'averto': 'warning',
u'admono': 'admonition',
u'flankteksto': 'sidebar',
u'temo': 'topic',
u'linea-bloko': 'line-block',
u'analizota-literalo': 'parsed-literal',
u'rubriko': 'rubric',
u'epigrafo': 'epigraph',
u'elstarajxoj': 'highlights',
u'elstara\u0135oj': 'highlights',
u'ekstera-citajxo': 'pull-quote',
u'ekstera-cita\u0135o': 'pull-quote',
u'kombinajxo': 'compound',
u'kombina\u0135o': 'compound',
u'tekstingo': 'container',
u'enhavilo': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'tabelo': 'table',
u'tabelo-vdk': 'csv-table', # "valoroj disigitaj per komoj"
u'tabelo-csv': 'csv-table',
u'tabelo-lista': 'list-table',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'bildo': 'image',
u'figuro': 'figure',
u'inkludi': 'include',
u'senanaliza': 'raw',
u'anstatauxi': 'replace',
u'anstata\u016di': 'replace',
u'unicode': 'unicode',
u'dato': 'date',
u'klaso': 'class',
u'rolo': 'role',
u'preterlasita-rolo': 'default-role',
u'titolo': 'title',
u'enhavo': 'contents',
u'seknum': 'sectnum',
u'sekcia-numerado': 'sectnum',
u'kapsekcio': 'header',
u'piedsekcio': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'celaj-notoj': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Esperanto name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'mallongigo': 'abbreviation',
u'mall': 'abbreviation',
u'komenclitero': 'acronym',
u'kl': 'acronym',
u'indekso': 'index',
u'i': 'index',
u'subskribo': 'subscript',
u'sub': 'subscript',
u'supraskribo': 'superscript',
u'sup': 'superscript',
u'titola-referenco': 'title-reference',
u'titolo': 'title-reference',
u't': 'title-reference',
u'pep-referenco': 'pep-reference',
u'pep': 'pep-reference',
u'rfc-referenco': 'rfc-reference',
u'rfc': 'rfc-reference',
u'emfazo': 'emphasis',
u'forta': 'strong',
u'litera': 'literal',
'math (translation required)': 'math',
u'nomita-referenco': 'named-reference',
u'nenomita-referenco': 'anonymous-reference',
u'piednota-referenco': 'footnote-reference',
u'citajxo-referenco': 'citation-reference',
u'cita\u0135o-referenco': 'citation-reference',
u'anstatauxa-referenco': 'substitution-reference',
u'anstata\u016da-referenco': 'substitution-reference',
u'celo': 'target',
u'uri-referenco': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'senanaliza': 'raw',
}
"""Mapping of Esperanto role names to canonical role names for interpreted text.
"""
|
bsd-3-clause
|
Syralist/pixels_clock
|
clock.py
|
1
|
3227
|
# -*- coding: utf-8 -*-
import pygame, led, sys, os, random, csv
import smbus
from pygame.locals import *
from led.PixelEventHandler import *
from time import gmtime, strftime
""" A very simple arcade shooter demo :)
"""
random.seed()
BLACK = pygame.Color(0,0,0)
WHITE = pygame.Color(255, 255, 255)
RED = pygame.Color(255, 0, 0)
GREEN = pygame.Color(0, 255, 0)
adress = 0x48
LM75 = smbus.SMBus(1)
# detect if a serial/USB port is given as argument
hasSerialPortParameter = ( sys.argv.__len__() > 1 )
# use 90 x 20 matrix when no usb port for real display provided
fallbackSize = ( 90, 20 )
if hasSerialPortParameter:
serialPort = sys.argv[1]
print "INITIALIZING WITH USB-PORT: " + serialPort
ledDisplay = led.teensy.TeensyDisplay(serialPort, fallbackSize)
else:
print "INITIALIZING WITH SERVER DISPLAY AND SIMULATOR."
ledDisplay = led.dsclient.DisplayServerClientDisplay('localhost', 8123, fallbackSize)
# use same size for sim and real LED panel
size = ledDisplay.size()
simDisplay = led.sim.SimDisplay(size)
screen = pygame.Surface(size)
gamestate = 0 #1=alive; 0=dead
def main():
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
pygame.joystick.init()
gameover = False
# Initialize first joystick
if pygame.joystick.get_count() > 0:
stick = pygame.joystick.Joystick(0)
stick.init()
global gamestate
scored = False
# Clear event list before starting the game
pygame.event.clear()
while not gameover:
# Process event queue
for pgevent in pygame.event.get():
if pgevent.type == QUIT:
pygame.quit()
sys.exit()
event = process_event(pgevent)
# End the game
if event.button == EXIT:
gameover = True
# Keypresses on keyboard and joystick axis motions / button presses
elif event.type == PUSH:
# Movements
if event.button == UP:
pass
elif event.button == DOWN:
pass
elif event.button == RIGHT:
pass
elif event.button == LEFT:
pass
# Tower selection
elif event.button == B2:
pass
# Tower placement
elif event.button == P1:
gameover = True
# Only on Keyboard
elif pgevent.type == KEYDOWN and pgevent.key == K_ESCAPE:
gameover = True
screen.fill(BLACK)
font = pygame.font.SysFont("Arial", 12)
text1 = font.render(strftime("%H:%M:%S"), 0, RED)
text1pos = text1.get_rect()
text1pos.midtop = (screen.get_rect().centerx, -1)
screen.blit(text1,text1pos)
try:
temp = LM75.read_byte(adress)
except:
temp = -1
text2 = font.render("T: "+str(temp)+"'C", 0, GREEN)
text2pos = text2.get_rect()
text2pos.midbottom = (screen.get_rect().centerx, 23)
screen.blit(text2,text2pos)
simDisplay.update(screen)
ledDisplay.update(screen)
clock.tick(10)
main()
|
gpl-3.0
|
bruce3557/NTHUOJ_web
|
problem/admin.py
|
4
|
1385
|
'''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.contrib import admin
from problem.models import Problem, Testcase, Submission, SubmissionDetail, Tag
# Register your models here.
admin.site.register(Problem)
admin.site.register(Testcase)
admin.site.register(Submission)
admin.site.register(SubmissionDetail)
admin.site.register(Tag)
|
mit
|
ProjexSoftware/projexui
|
projexui/widgets/xquerybuilderwidget/xquerybuilderwidget.py
|
2
|
9247
|
#!/usr/bin/python
""" Defines an interface to allow users to build their queries on the fly. """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = '[email protected]'
#------------------------------------------------------------------------------
from projex.text import nativestring
from projexui.qt import Signal
from projexui.qt.QtCore import Qt
from projexui.qt.QtGui import QWidget,\
QVBoxLayout
import projexui
from projexui.widgets.xquerybuilderwidget.xqueryrule \
import XQueryRule
from projexui.widgets.xquerybuilderwidget.xquerylinewidget \
import XQueryLineWidget
class XQueryBuilderWidget(QWidget):
""" """
saveRequested = Signal()
resetRequested = Signal()
cancelRequested = Signal()
def __init__( self, parent = None ):
super(XQueryBuilderWidget, self).__init__( parent )
# load the user interface
projexui.loadUi(__file__, self)
self.setMinimumWidth(470)
# define custom properties
self._rules = {}
self._defaultQuery = []
self._completionTerms = []
self._minimumCount = 1
# set default properties
self._container = QWidget(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
layout.addStretch(1)
self._container.setLayout(layout)
self.uiQueryAREA.setWidget(self._container)
# create connections
self.uiResetBTN.clicked.connect( self.emitResetRequested )
self.uiSaveBTN.clicked.connect( self.emitSaveRequested )
self.uiCancelBTN.clicked.connect( self.emitCancelRequested )
self.resetRequested.connect( self.reset )
def addLineWidget( self, query = None ):
"""
Adds a new line widget to the system with the given values.
:param query | (<str> term, <str> operator, <str> vlaue) || None
"""
widget = XQueryLineWidget(self)
widget.setTerms(sorted(self._rules.keys()))
widget.setQuery(query)
index = self._container.layout().count() - 1
self._container.layout().insertWidget(index, widget)
widget.addRequested.connect( self.addLineWidget )
widget.removeRequested.connect( self.removeLineWidget )
# update the remove enabled options for these widgets
self.updateRemoveEnabled()
def addRule( self, rule ):
"""
Adds a rule to the system.
:param rule | <XQueryRule>
"""
self._rules[rule.term()] = rule
self.updateRules()
def clear( self ):
"""
Clears out all the widgets from the system.
"""
for lineWidget in self.lineWidgets():
lineWidget.setParent(None)
lineWidget.deleteLater()
def completionTerms( self ):
"""
Returns the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:return [<str>, ..]
"""
return self._completionTerms
def count( self ):
"""
Returns the count of the line widgets in the system.
:return <int>
"""
return len(self.lineWidgets())
def currentQuery( self ):
"""
Returns the current query string for this widget.
:return [(<str> term, <str> operator, <str> value), ..]
"""
widgets = self.lineWidgets()
output = []
for widget in widgets:
output.append(widget.query())
return output
def defaultQuery( self ):
"""
Returns the default query for the system.
:return [(<str> term, <str> operator, <str> value), ..]
"""
return self._defaultQuery
def keyPressEvent( self, event ):
"""
Emits the save requested signal for this builder for when the enter
or return press is clicked.
:param event | <QKeyEvent>
"""
if ( event.key() in (Qt.Key_Enter, Qt.Key_Return) ):
self.emitSaveRequested()
super(XQueryBuilderWidget, self).keyPressEvent(event)
def emitCancelRequested( self ):
"""
Emits the cancel requested signal.
"""
if ( not self.signalsBlocked() ):
self.cancelRequested.emit()
def emitResetRequested( self ):
"""
Emits the reste requested signal.
"""
if ( not self.signalsBlocked() ):
self.resetRequested.emit()
def emitSaveRequested( self ):
"""
Emits the save requested signal.
"""
if ( not self.signalsBlocked() ):
self.saveRequested.emit()
def findRule( self, term ):
"""
Looks up a rule by the inputed term.
:param term | <str>
:return <XQueryRule> || None
"""
return self._rules.get(nativestring(term))
def removeLineWidget( self, widget ):
"""
Removes the line widget from the query.
:param widget | <XQueryLineWidget>
"""
widget.setParent(None)
widget.deleteLater()
self.updateRemoveEnabled()
def minimumCount( self ):
"""
Defines the minimum number of query widgets that are allowed.
:return <int>
"""
return self._minimumCount
def lineWidgets( self ):
"""
Returns a list of line widgets for this system.
:return [<XQueryLineWidget>, ..]
"""
return self.findChildren(XQueryLineWidget)
def reset( self ):
"""
Resets the system to the default query.
"""
self.setCurrentQuery(self.defaultQuery())
def setCompletionTerms( self, terms ):
"""
Sets the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:param terms | [<str>, ..]
"""
self._completionTerms = terms
def setCurrentQuery( self, query ):
"""
Sets the query for this system to the inputed query.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self.clear()
for entry in query:
self.addLineWidget(entry)
# make sure we have the minimum number of widgets
for i in range(self.minimumCount() - len(query)):
self.addLineWidget()
def setDefaultQuery( self, query ):
"""
Sets the default query that will be used when the user clicks on the \
reset button or the reset method is called.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self._defaultQuery = query[:]
def setMinimumCount( self, count ):
"""
Sets the minimum number of line widgets that are allowed at any \
given time.
:param count | <int>
"""
self._minimumCount = count
def setRules( self, rules ):
"""
Sets all the rules for this builder.
:param rules | [<XQueryRule>, ..]
"""
if ( type(rules) in (list, tuple) ):
self._rules = dict([(x.term(), x) for x in rules])
self.updateRules()
return True
elif ( type(rules) == dict ):
self._rules = rules.copy()
self.updateRules()
return True
else:
return False
def setTerms( self, terms ):
"""
Sets a simple rule list by accepting a list of strings for terms. \
This is a convenience method for the setRules method.
:param rules | [<str> term, ..]
"""
return self.setRules([XQueryRule(term = term) for term in terms])
def updateRemoveEnabled( self ):
"""
Updates the remove enabled baesd on the current number of line widgets.
"""
lineWidgets = self.lineWidgets()
count = len(lineWidgets)
state = self.minimumCount() < count
for widget in lineWidgets:
widget.setRemoveEnabled(state)
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms)
|
lgpl-3.0
|
diagramsoftware/odoo
|
addons/analytic_contract_hr_expense/analytic_contract_hr_expense.py
|
223
|
7860
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _get_total_estimation(self, account):
tot_est = super(account_analytic_account, self)._get_total_estimation(account)
if account.charge_expenses:
tot_est += account.est_expenses
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = super(account_analytic_account, self)._get_total_invoiced(account)
if account.charge_expenses:
total_invoiced += account.expense_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = super(account_analytic_account, self)._get_total_remaining(account)
if account.charge_expenses:
total_remaining += account.remaining_expense
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = super(account_analytic_account, self)._get_total_toinvoice(account)
if account.charge_expenses:
total_toinvoice += account.expense_to_invoice
return total_toinvoice
def _remaining_expnse_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.est_expenses != 0:
res[account.id] = max(account.est_expenses - account.expense_invoiced, account.expense_to_invoice)
else:
res[account.id]=0.0
return res
def _expense_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
#We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for account in self.pool.get('account.analytic.account').browse(cr, uid, ids, context=context):
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type = 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id] = 0.0
for product_id, total_amount, user_id, factor_id, qty, uom, line_name in cr.fetchall():
#the amount to reinvoice is the real cost. We don't use the pricelist
total_amount = -total_amount
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id] += total_amount * (100 - factor.factor or 0.0) / 100.0
return res
def _expense_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'purchase')], context=context)
#Put invoices in separate array in order not to calculate them double
invoices = []
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in invoices:
invoices.append(line.invoice_id)
for invoice in invoices:
res[account.id] += invoice.amount_untaxed
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
result = super(account_analytic_account, self)._ca_invoiced_calc(cr, uid, ids, name, arg, context=context)
for acc in self.browse(cr, uid, result.keys(), context=context):
result[acc.id] = result[acc.id] - (acc.expense_invoiced or 0.0)
return result
_columns = {
'charge_expenses' : fields.boolean('Charge Expenses'),
'expense_invoiced' : fields.function(_expense_invoiced_calc, type="float"),
'expense_to_invoice' : fields.function(_expense_to_invoice_calc, type='float'),
'remaining_expense' : fields.function(_remaining_expnse_calc, type="float"),
'est_expenses': fields.float('Estimation of Expenses to Invoice'),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['charge_expenses'] = template.charge_expenses
res['value']['est_expenses'] = template.est_expenses
return res
def open_hr_expense(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
dummy, act_window_id = mod_obj.get_object_reference(cr, uid, 'hr_expense', 'expense_all')
result = act_obj.read(cr, uid, [act_window_id], context=context)[0]
line_ids = self.pool.get('hr.expense.line').search(cr,uid,[('analytic_account', 'in', ids)])
result['domain'] = [('line_ids', 'in', line_ids)]
names = [account.name for account in self.browse(cr, uid, ids, context=context)]
result['name'] = _('Expenses of %s') % ','.join(names)
result['context'] = {'analytic_account': ids[0]}
result['view_type'] = 'form'
return result
def hr_to_invoice_expense(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'purchase'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Expenses to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
maverickYQB/mqtt_zway
|
test/test_main_class.py
|
1
|
2397
|
#!/usr/bin/env python
'''
Created on Mars 20 2016
@author: popotvin
'''
import mqtt_zway_test
import mqtt_zway
import paho.mqtt.client as mqtt
import time
import traceback
date_time = mqtt_zway_test.date_time
# Main variables
mqtt_old_payload = []
mqtt_new_payload = []
payload = {}
publish_string = ""
# MQTT config
outgoing_topic = mqtt_zway_test.outgoing_topic
ongoing_topic = mqtt_zway_test.ongoing_topic
mqtt_ip = mqtt_zway_test.mqtt_ip
mqtt_port = mqtt_zway_test.mqtt_port
mqtt_client = mqtt_zway_test.mqtt_client
# ZWAY config
zway_ip = mqtt_zway_test.zway_ip
zway_port = mqtt_zway_test.zway_port
# list of connected devices on the zway server (device_id, device type, device level value)
zway_devList = mqtt_zway.zway_devList(zway_ip,zway_port)
# MQTT Client init
mqttc = mqtt.Client(str(mqtt_client))
mqttc.on_subscribe = mqtt_zway_test.on_subscribe
mqttc.on_message = mqtt_zway_test.on_message
mqttc.on_connect = mqtt_zway_test.on_connect
mqttc.connect(mqtt_ip, mqtt_port)
# Test zway and MQTT servers
zway_test = mqtt_zway.server_test(zway_ip, zway_port)
mqtt_test = mqtt_zway.server_test(mqtt_ip, mqtt_port)
# Main loop
if zway_test and mqtt_test:
print "ZWAY is running at: %s"% str(date_time)
print "MQTT is running at: %s"% str(date_time)
while True:
try:
mqttc.loop()
for key, value in zway_devList.dev_dict().iteritems():
for i,j in value.iteritems():
if i == "id":
dev_id = j
elif i == "type":
dev_type = j
zway_devList.dev_get(dev_id, dev_type)
payload["device_id"] = str(dev_id)
payload["type"] = str(dev_type)
payload["value"] = zway_devList.dev_value(dev_id, dev_type)
mqtt_new_payload.append(dict(payload))
time.sleep(0.1)
if mqtt_old_payload != mqtt_new_payload:
mqttc.publish(outgoing_topic, str(mqtt_new_payload))
#print "published to mQTT: %s" % mqtt_new_payload
mqtt_old_payload = mqtt_new_payload
mqtt_new_payload = []
time.sleep(0.5)
except Exception, e:
print traceback.print_exc()
break
elif not zway_test:
print "ZWAY server is offline"
elif not mqtt_test:
print "MQTT server is Offline"
|
gpl-3.0
|
j4/horizon
|
openstack_dashboard/urls.py
|
56
|
1979
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls.static import static # noqa
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
import horizon
urlpatterns = patterns(
'',
url(r'^$', 'openstack_dashboard.views.splash', name='splash'),
url(r'^api/', include('openstack_dashboard.api.rest.urls')),
url(r'', include(horizon.urls)),
)
for u in getattr(settings, 'AUTHENTICATION_URLS', ['openstack_auth.urls']):
urlpatterns += patterns(
'',
url(r'^auth/', include(u))
)
# Development static app and project media serving using the staticfiles app.
urlpatterns += staticfiles_urlpatterns()
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^500/$', 'django.views.defaults.server_error')
)
|
apache-2.0
|
tylerclair/py3canvas
|
py3canvas/apis/modules.py
|
1
|
54047
|
"""Modules API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ModulesAPI(BaseCanvasAPI):
"""Modules API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ModulesAPI."""
super(ModulesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ModulesAPI")
def list_modules(self, course_id, include=None, search_term=None, student_id=None):
"""
List modules.
List the modules in a course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial name of the modules (and module items, if include['items'] is
specified) to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, all_pages=True)
def show_module(self, id, course_id, include=None, student_id=None):
"""
Show module.
Get information about a single module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""- "items": Return module items inline if possible.
This parameter suggests that Canvas return module items directly
in the Module object JSON, to avoid having to make separate API
requests for each module when enumerating modules and items. Canvas
is free to omit 'items' for any particular module if it deems them
too numerous to return inline. Callers must be prepared to use the
{api:ContextModuleItemsApiController#index List Module Items API}
if items are not returned.
- "content_details": Requires include['items']. Returns additional
details with module items specific to their associated content items.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["items", "content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def create_module(self, course_id, module_name, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Create a module.
Create and return a new module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - module[name]
"""The name of the module"""
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of this module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked.
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
self.logger.debug("POST /api/v1/courses/{course_id}/modules with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules".format(**path), data=data, params=params, single_item=True)
def update_module(self, id, course_id, module_name=None, module_position=None, module_prerequisite_module_ids=None, module_publish_final_grade=None, module_published=None, module_require_sequential_progress=None, module_unlock_at=None):
"""
Update a module.
Update and return an existing module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module[name]
"""The name of the module"""
if module_name is not None:
data["module[name]"] = module_name
# OPTIONAL - module[unlock_at]
"""The date the module will unlock"""
if module_unlock_at is not None:
if issubclass(module_unlock_at.__class__, str):
module_unlock_at = self._validate_iso8601_string(module_unlock_at)
elif issubclass(module_unlock_at.__class__, date) or issubclass(module_unlock_at.__class__, datetime):
module_unlock_at = module_unlock_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["module[unlock_at]"] = module_unlock_at
# OPTIONAL - module[position]
"""The position of the module in the course (1-based)"""
if module_position is not None:
data["module[position]"] = module_position
# OPTIONAL - module[require_sequential_progress]
"""Whether module items must be unlocked in order"""
if module_require_sequential_progress is not None:
data["module[require_sequential_progress]"] = module_require_sequential_progress
# OPTIONAL - module[prerequisite_module_ids]
"""IDs of Modules that must be completed before this one is unlocked
Prerequisite modules must precede this module (i.e. have a lower position
value), otherwise they will be ignored"""
if module_prerequisite_module_ids is not None:
data["module[prerequisite_module_ids]"] = module_prerequisite_module_ids
# OPTIONAL - module[publish_final_grade]
"""Whether to publish the student's final grade for the course upon
completion of this module."""
if module_publish_final_grade is not None:
data["module[publish_final_grade]"] = module_publish_final_grade
# OPTIONAL - module[published]
"""Whether the module is published and visible to students"""
if module_published is not None:
data["module[published]"] = module_published
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def delete_module(self, id, course_id):
"""
Delete module.
Delete a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{id}".format(**path), data=data, params=params, single_item=True)
def re_lock_module_progressions(self, id, course_id):
"""
Re-lock module progressions.
Resets module progressions to their default locked state and
recalculates them based on the current requirements.
Adding progression requirements to an active course will not lock students
out of modules they have already unlocked unless this action is called.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{id}/relock with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{id}/relock".format(**path), data=data, params=params, single_item=True)
def list_module_items(self, course_id, module_id, include=None, search_term=None, student_id=None):
"""
List module items.
List the items in a module
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with each item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - search_term
"""The partial title of the items to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, all_pages=True)
def show_module_item(self, id, course_id, module_id, include=None, student_id=None):
"""
Show module item.
Get information about a single module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""If included, will return additional details specific to the content
associated with this item. Refer to the {api:Modules:Module%20Item Module
Item specification} for more details.
Includes standard lock information for each item."""
if include is not None:
self._validate_enum(include, ["content_details"])
params["include"] = include
# OPTIONAL - student_id
"""Returns module completion information for the student with this id."""
if student_id is not None:
params["student_id"] = student_id
self.logger.debug("GET /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def create_module_item(self, course_id, module_id, module_item_type, module_item_content_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_new_tab=None, module_item_page_url=None, module_item_position=None, module_item_title=None):
"""
Create a module item.
Create and return a new module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# OPTIONAL - module_item[title]
"""The name of the module item and associated content"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# REQUIRED - module_item[type]
"""The type of content linked to the item"""
self._validate_enum(module_item_type, ["File", "Page", "Discussion", "Assignment", "Quiz", "SubHeader", "ExternalUrl", "ExternalTool"])
data["module_item[type]"] = module_item_type
# REQUIRED - module_item[content_id]
"""The id of the content to link to the module item. Required, except for
'ExternalUrl', 'Page', and 'SubHeader' types."""
data["module_item[content_id]"] = module_item_content_id
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)."""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[page_url]
"""Suffix for the linked wiki page (e.g. 'front-page'). Required for 'Page'
type."""
if module_item_page_url is not None:
data["module_item[page_url]"] = module_item_page_url
# OPTIONAL - module_item[external_url]
"""External url that the item points to. [Required for 'ExternalUrl' and
'ExternalTool' types."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete. Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items".format(**path), data=data, params=params, single_item=True)
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
"""
Update a module item.
Update and return an existing module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - module_item[title]
"""The name of the module item"""
if module_item_title is not None:
data["module_item[title]"] = module_item_title
# OPTIONAL - module_item[position]
"""The position of this item in the module (1-based)"""
if module_item_position is not None:
data["module_item[position]"] = module_item_position
# OPTIONAL - module_item[indent]
"""0-based indent level; module items may be indented to show a hierarchy"""
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
# OPTIONAL - module_item[external_url]
"""External url that the item points to. Only applies to 'ExternalUrl' type."""
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
# OPTIONAL - module_item[new_tab]
"""Whether the external tool opens in a new tab. Only applies to
'ExternalTool' type."""
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
# OPTIONAL - module_item[completion_requirement][type]
"""Completion requirement for this module item.
"must_view": Applies to all item types
"must_contribute": Only applies to "Assignment", "Discussion", and "Page" types
"must_submit", "min_score": Only apply to "Assignment" and "Quiz" types
Inapplicable types will be ignored"""
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
# OPTIONAL - module_item[completion_requirement][min_score]
"""Minimum score required to complete, Required for completion_requirement
type 'min_score'."""
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
# OPTIONAL - module_item[published]
"""Whether the module item is published and visible to students."""
if module_item_published is not None:
data["module_item[published]"] = module_item_published
# OPTIONAL - module_item[module_id]
"""Move this item to another module by specifying the target module id here.
The target module must be in the same course."""
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True)
def delete_module_item(self, id, course_id, module_id):
"""
Delete module item.
Delete a module item
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
def mark_module_item_as_done_not_done(self, id, course_id, module_id):
"""
Mark module item as done/not done.
Mark a module item as done/not done. Use HTTP method PUT to mark as done,
and DELETE to mark as not done.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/done".format(**path), data=data, params=params, no_data=True)
def get_module_item_sequence(self, course_id, asset_id=None, asset_type=None):
"""
Get module item sequence.
Given an asset in a course, find the ModuleItem it belongs to, and also the previous and next Module Items
in the course sequence.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - asset_type
"""The type of asset to find module sequence information for. Use the ModuleItem if it is known
(e.g., the user navigated from a module item), since this will avoid ambiguity if the asset
appears more than once in the module sequence."""
if asset_type is not None:
self._validate_enum(asset_type, ["ModuleItem", "File", "Page", "Discussion", "Assignment", "Quiz", "ExternalTool"])
params["asset_type"] = asset_type
# OPTIONAL - asset_id
"""The id of the asset (or the url in the case of a Page)"""
if asset_id is not None:
params["asset_id"] = asset_id
self.logger.debug("GET /api/v1/courses/{course_id}/module_item_sequence with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/module_item_sequence".format(**path), data=data, params=params, single_item=True)
def mark_module_item_read(self, id, course_id, module_id):
"""
Mark module item read.
Fulfills "must view" requirement for a module item. It is generally not necessary to do this explicitly,
but it is provided for applications that need to access external content directly (bypassing the html_url
redirect that normally allows Canvas to fulfill "must view" requirements).
This endpoint cannot be used to complete requirements on locked or unpublished module items.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/mark_read".format(**path), data=data, params=params, no_data=True)
class Contentdetails(BaseModel):
"""Contentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Contentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Contentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
class Moduleitemsequenceasset(BaseModel):
"""Moduleitemsequenceasset Model."""
def __init__(self, module_id=None, type=None, id=None, title=None):
"""Init method for Moduleitemsequenceasset class."""
self._module_id = module_id
self._type = type
self._id = id
self._title = title
self.logger = logging.getLogger('py3canvas.Moduleitemsequenceasset')
@property
def module_id(self):
"""module_id."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""id."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def title(self):
"""title."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
class Moduleitemcompletionrequirement(BaseModel):
"""Moduleitemcompletionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Moduleitemcompletionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Moduleitemcompletionrequirement')
@property
def min_score(self):
"""min_score."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""type."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""completed."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Module(BaseModel):
"""Module Model."""
def __init__(self, completed_at=None, items_count=None, unlock_at=None, workflow_state=None, items=None, prerequisite_module_ids=None, state=None, publish_final_grade=None, position=None, items_url=None, id=None, require_sequential_progress=None, name=None):
"""Init method for Module class."""
self._completed_at = completed_at
self._items_count = items_count
self._unlock_at = unlock_at
self._workflow_state = workflow_state
self._items = items
self._prerequisite_module_ids = prerequisite_module_ids
self._state = state
self._publish_final_grade = publish_final_grade
self._position = position
self._items_url = items_url
self._id = id
self._require_sequential_progress = require_sequential_progress
self._name = name
self.logger = logging.getLogger('py3canvas.Module')
@property
def completed_at(self):
"""the date the calling user completed the module (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed_at
@completed_at.setter
def completed_at(self, value):
"""Setter for completed_at property."""
self.logger.warn("Setting values on completed_at will NOT update the remote Canvas instance.")
self._completed_at = value
@property
def items_count(self):
"""The number of items in the module."""
return self._items_count
@items_count.setter
def items_count(self, value):
"""Setter for items_count property."""
self.logger.warn("Setting values on items_count will NOT update the remote Canvas instance.")
self._items_count = value
@property
def unlock_at(self):
"""(Optional) the date this module will unlock."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def workflow_state(self):
"""the state of the module: 'active', 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def items(self):
"""The contents of this module, as an array of Module Items. (Present only if requested via include[]=items AND the module is not deemed too large by Canvas.)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def prerequisite_module_ids(self):
"""IDs of Modules that must be completed before this one is unlocked."""
return self._prerequisite_module_ids
@prerequisite_module_ids.setter
def prerequisite_module_ids(self, value):
"""Setter for prerequisite_module_ids property."""
self.logger.warn("Setting values on prerequisite_module_ids will NOT update the remote Canvas instance.")
self._prerequisite_module_ids = value
@property
def state(self):
"""The state of this Module for the calling user one of 'locked', 'unlocked', 'started', 'completed' (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._state
@state.setter
def state(self, value):
"""Setter for state property."""
self.logger.warn("Setting values on state will NOT update the remote Canvas instance.")
self._state = value
@property
def publish_final_grade(self):
"""if the student's final grade for the course should be published to the SIS upon completion of this module."""
return self._publish_final_grade
@publish_final_grade.setter
def publish_final_grade(self, value):
"""Setter for publish_final_grade property."""
self.logger.warn("Setting values on publish_final_grade will NOT update the remote Canvas instance.")
self._publish_final_grade = value
@property
def position(self):
"""the position of this module in the course (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def items_url(self):
"""The API URL to retrive this module's items."""
return self._items_url
@items_url.setter
def items_url(self, value):
"""Setter for items_url property."""
self.logger.warn("Setting values on items_url will NOT update the remote Canvas instance.")
self._items_url = value
@property
def id(self):
"""the unique identifier for the module."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def require_sequential_progress(self):
"""Whether module items must be unlocked in order."""
return self._require_sequential_progress
@require_sequential_progress.setter
def require_sequential_progress(self, value):
"""Setter for require_sequential_progress property."""
self.logger.warn("Setting values on require_sequential_progress will NOT update the remote Canvas instance.")
self._require_sequential_progress = value
@property
def name(self):
"""the name of this module."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
class Moduleitemsequence(BaseModel):
"""Moduleitemsequence Model."""
def __init__(self, items=None, modules=None):
"""Init method for Moduleitemsequence class."""
self._items = items
self._modules = modules
self.logger = logging.getLogger('py3canvas.Moduleitemsequence')
@property
def items(self):
"""an array containing one hash for each appearence of the asset in the module sequence (up to 10 total)."""
return self._items
@items.setter
def items(self, value):
"""Setter for items property."""
self.logger.warn("Setting values on items will NOT update the remote Canvas instance.")
self._items = value
@property
def modules(self):
"""an array containing each Module referenced above."""
return self._modules
@modules.setter
def modules(self, value):
"""Setter for modules property."""
self.logger.warn("Setting values on modules will NOT update the remote Canvas instance.")
self._modules = value
class Completionrequirement(BaseModel):
"""Completionrequirement Model."""
def __init__(self, min_score=None, type=None, completed=None):
"""Init method for Completionrequirement class."""
self._min_score = min_score
self._type = type
self._completed = completed
self.logger = logging.getLogger('py3canvas.Completionrequirement')
@property
def min_score(self):
"""minimum score required to complete (only present when type == 'min_score')."""
return self._min_score
@min_score.setter
def min_score(self, value):
"""Setter for min_score property."""
self.logger.warn("Setting values on min_score will NOT update the remote Canvas instance.")
self._min_score = value
@property
def type(self):
"""one of 'must_view', 'must_submit', 'must_contribute', 'min_score'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def completed(self):
"""whether the calling user has met this requirement (Optional; present only if the caller is a student or if the optional parameter 'student_id' is included)."""
return self._completed
@completed.setter
def completed(self, value):
"""Setter for completed property."""
self.logger.warn("Setting values on completed will NOT update the remote Canvas instance.")
self._completed = value
class Moduleitem(BaseModel):
"""Moduleitem Model."""
def __init__(self, indent=None, title=None, url=None, completion_requirement=None, html_url=None, content_details=None, new_tab=None, external_url=None, position=None, module_id=None, content_id=None, type=None, id=None, page_url=None):
"""Init method for Moduleitem class."""
self._indent = indent
self._title = title
self._url = url
self._completion_requirement = completion_requirement
self._html_url = html_url
self._content_details = content_details
self._new_tab = new_tab
self._external_url = external_url
self._position = position
self._module_id = module_id
self._content_id = content_id
self._type = type
self._id = id
self._page_url = page_url
self.logger = logging.getLogger('py3canvas.Moduleitem')
@property
def indent(self):
"""0-based indent level; module items may be indented to show a hierarchy."""
return self._indent
@indent.setter
def indent(self, value):
"""Setter for indent property."""
self.logger.warn("Setting values on indent will NOT update the remote Canvas instance.")
self._indent = value
@property
def title(self):
"""the title of this item."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def url(self):
"""(Optional) link to the Canvas API object, if applicable."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def completion_requirement(self):
"""Completion requirement for this module item."""
return self._completion_requirement
@completion_requirement.setter
def completion_requirement(self, value):
"""Setter for completion_requirement property."""
self.logger.warn("Setting values on completion_requirement will NOT update the remote Canvas instance.")
self._completion_requirement = value
@property
def html_url(self):
"""link to the item in Canvas."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def content_details(self):
"""(Present only if requested through include[]=content_details) If applicable, returns additional details specific to the associated object."""
return self._content_details
@content_details.setter
def content_details(self, value):
"""Setter for content_details property."""
self.logger.warn("Setting values on content_details will NOT update the remote Canvas instance.")
self._content_details = value
@property
def new_tab(self):
"""(only for 'ExternalTool' type) whether the external tool opens in a new tab."""
return self._new_tab
@new_tab.setter
def new_tab(self, value):
"""Setter for new_tab property."""
self.logger.warn("Setting values on new_tab will NOT update the remote Canvas instance.")
self._new_tab = value
@property
def external_url(self):
"""(only for 'ExternalUrl' and 'ExternalTool' types) external url that the item points to."""
return self._external_url
@external_url.setter
def external_url(self, value):
"""Setter for external_url property."""
self.logger.warn("Setting values on external_url will NOT update the remote Canvas instance.")
self._external_url = value
@property
def position(self):
"""the position of this item in the module (1-based)."""
return self._position
@position.setter
def position(self, value):
"""Setter for position property."""
self.logger.warn("Setting values on position will NOT update the remote Canvas instance.")
self._position = value
@property
def module_id(self):
"""the id of the Module this item appears in."""
return self._module_id
@module_id.setter
def module_id(self, value):
"""Setter for module_id property."""
self.logger.warn("Setting values on module_id will NOT update the remote Canvas instance.")
self._module_id = value
@property
def content_id(self):
"""the id of the object referred to applies to 'File', 'Discussion', 'Assignment', 'Quiz', 'ExternalTool' types."""
return self._content_id
@content_id.setter
def content_id(self, value):
"""Setter for content_id property."""
self.logger.warn("Setting values on content_id will NOT update the remote Canvas instance.")
self._content_id = value
@property
def type(self):
"""the type of object referred to one of 'File', 'Page', 'Discussion', 'Assignment', 'Quiz', 'SubHeader', 'ExternalUrl', 'ExternalTool'."""
return self._type
@type.setter
def type(self, value):
"""Setter for type property."""
self.logger.warn("Setting values on type will NOT update the remote Canvas instance.")
self._type = value
@property
def id(self):
"""the unique identifier for the module item."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def page_url(self):
"""(only for 'Page' type) unique locator for the linked wiki page."""
return self._page_url
@page_url.setter
def page_url(self, value):
"""Setter for page_url property."""
self.logger.warn("Setting values on page_url will NOT update the remote Canvas instance.")
self._page_url = value
class Moduleitemsequencenode(BaseModel):
"""Moduleitemsequencenode Model."""
def __init__(self, current=None, prev=None, next=None):
"""Init method for Moduleitemsequencenode class."""
self._current = current
self._prev = prev
self._next = next
self.logger = logging.getLogger('py3canvas.Moduleitemsequencenode')
@property
def current(self):
"""current."""
return self._current
@current.setter
def current(self, value):
"""Setter for current property."""
self.logger.warn("Setting values on current will NOT update the remote Canvas instance.")
self._current = value
@property
def prev(self):
"""prev."""
return self._prev
@prev.setter
def prev(self, value):
"""Setter for prev property."""
self.logger.warn("Setting values on prev will NOT update the remote Canvas instance.")
self._prev = value
@property
def next(self):
"""next."""
return self._next
@next.setter
def next(self, value):
"""Setter for next property."""
self.logger.warn("Setting values on next will NOT update the remote Canvas instance.")
self._next = value
class Moduleitemcontentdetails(BaseModel):
"""Moduleitemcontentdetails Model."""
def __init__(self, unlock_at=None, due_at=None, points_possible=None, lock_info=None, lock_at=None, lock_explanation=None, locked_for_user=None):
"""Init method for Moduleitemcontentdetails class."""
self._unlock_at = unlock_at
self._due_at = due_at
self._points_possible = points_possible
self._lock_info = lock_info
self._lock_at = lock_at
self._lock_explanation = lock_explanation
self._locked_for_user = locked_for_user
self.logger = logging.getLogger('py3canvas.Moduleitemcontentdetails')
@property
def unlock_at(self):
"""unlock_at."""
return self._unlock_at
@unlock_at.setter
def unlock_at(self, value):
"""Setter for unlock_at property."""
self.logger.warn("Setting values on unlock_at will NOT update the remote Canvas instance.")
self._unlock_at = value
@property
def due_at(self):
"""due_at."""
return self._due_at
@due_at.setter
def due_at(self, value):
"""Setter for due_at property."""
self.logger.warn("Setting values on due_at will NOT update the remote Canvas instance.")
self._due_at = value
@property
def points_possible(self):
"""points_possible."""
return self._points_possible
@points_possible.setter
def points_possible(self, value):
"""Setter for points_possible property."""
self.logger.warn("Setting values on points_possible will NOT update the remote Canvas instance.")
self._points_possible = value
@property
def lock_info(self):
"""lock_info."""
return self._lock_info
@lock_info.setter
def lock_info(self, value):
"""Setter for lock_info property."""
self.logger.warn("Setting values on lock_info will NOT update the remote Canvas instance.")
self._lock_info = value
@property
def lock_at(self):
"""lock_at."""
return self._lock_at
@lock_at.setter
def lock_at(self, value):
"""Setter for lock_at property."""
self.logger.warn("Setting values on lock_at will NOT update the remote Canvas instance.")
self._lock_at = value
@property
def lock_explanation(self):
"""lock_explanation."""
return self._lock_explanation
@lock_explanation.setter
def lock_explanation(self, value):
"""Setter for lock_explanation property."""
self.logger.warn("Setting values on lock_explanation will NOT update the remote Canvas instance.")
self._lock_explanation = value
@property
def locked_for_user(self):
"""locked_for_user."""
return self._locked_for_user
@locked_for_user.setter
def locked_for_user(self, value):
"""Setter for locked_for_user property."""
self.logger.warn("Setting values on locked_for_user will NOT update the remote Canvas instance.")
self._locked_for_user = value
|
mit
|
fengzhe29888/gnuradio-old
|
gr-blocks/python/blocks/qa_threshold.py
|
57
|
1537
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_threshold(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_01(self):
tb = self.tb
data = [0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2]
expected_result = (0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1)
src = blocks.vector_source_f(data, False)
op = blocks.threshold_ff(1, 1)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_threshold, "test_threshold.xml")
|
gpl-3.0
|
kinnou02/navitia
|
source/jormungandr/jormungandr/parking_space_availability/__init__.py
|
3
|
1795
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from jormungandr.parking_space_availability.abstract_parking_places_provider import AbstractParkingPlacesProvider
from jormungandr.parking_space_availability.abstract_provider_manager import AbstractProviderManager
from jormungandr.parking_space_availability.abstract_provider_manager import get_from_to_pois_of_journeys
from jormungandr.parking_space_availability.bss.stands import Stands, StandsStatus
from jormungandr.parking_space_availability.car.parking_places import ParkingPlaces
|
agpl-3.0
|
0xkag/tornado
|
tornado/test/simple_httpclient_test.py
|
13
|
22722
|
from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@gen.coroutine
def get(self):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.stream
yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_default_ca_certs()).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
response = self.fetch('/trigger?wake=false', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.15, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
#
# A test without a content-length header is included below
# in HTTP204NoContentTestCase.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, "Malformed HTTP message"):
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
server_socket, port = bind_unused_port()
server_socket.close()
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://localhost:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). Tornado always
# sends a content-length, so we simulate here a server that sends
# no content length and does not close the connection.
#
# Tests of a 204 response with a Content-Length header are included
# in SimpleHTTPClientTestMixin.
request.connection.stream.write(
b"HTTP/1.1 204 No content\r\n\r\n")
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
|
apache-2.0
|
IronLanguages/ironpython2
|
Src/StdLib/Lib/test/test_poll.py
|
4
|
7315
|
# Test case for the os.poll() function
import os
import random
import select
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.test_support import TESTFN, run_unittest, reap_threads, cpython_only
try:
select.poll
except AttributeError:
raise unittest.SkipTest, "select.poll not defined -- skipping test_poll"
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = " This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError, "no pipes ready for writing"
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError, "no pipes ready for reading"
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1L << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_poll_c_limits(self):
from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
pollster = select.poll()
pollster.register(1)
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_poll_blocks_with_negative_ms(self):
for timeout_ms in [None, -1000, -1, -1.0]:
# Create two file descriptors. This will be used to unlock
# the blocking call to poll.poll inside the thread
r, w = os.pipe()
pollster = select.poll()
pollster.register(r, select.POLLIN)
poll_thread = threading.Thread(target=pollster.poll, args=(timeout_ms,))
poll_thread.start()
poll_thread.join(timeout=0.1)
self.assertTrue(poll_thread.is_alive())
# Write to the pipe so pollster.poll unblocks and the thread ends.
os.write(w, b'spam')
poll_thread.join()
self.assertFalse(poll_thread.is_alive())
os.close(r)
os.close(w)
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
gundalow/ansible
|
lib/ansible/executor/task_queue_manager.py
|
11
|
18711
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import tempfile
import threading
import time
import multiprocessing.queues
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
from ansible.executor.task_result import TaskResult
from ansible.module_utils.six import PY3, string_types
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.vars.reserved import warn_if_reserved
from ansible.utils.display import Display
from ansible.utils.lock import lock_decorator
from ansible.utils.multiprocessing import context as multiprocessing_context
__all__ = ['TaskQueueManager']
display = Display()
class CallbackSend:
def __init__(self, method_name, *args, **kwargs):
self.method_name = method_name
self.args = args
self.kwargs = kwargs
class FinalQueue(multiprocessing.queues.Queue):
def __init__(self, *args, **kwargs):
if PY3:
kwargs['ctx'] = multiprocessing_context
super(FinalQueue, self).__init__(*args, **kwargs)
def send_callback(self, method_name, *args, **kwargs):
self.put(
CallbackSend(method_name, *args, **kwargs),
block=False
)
def send_task_result(self, *args, **kwargs):
if isinstance(args[0], TaskResult):
tr = args[0]
else:
tr = TaskResult(*args, **kwargs)
self.put(
tr,
block=False
)
class AnsibleEndPlay(Exception):
def __init__(self, result):
self.result = result
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
RUN_OK = 0
RUN_ERROR = 1
RUN_FAILED_HOSTS = 2
RUN_UNREACHABLE_HOSTS = 4
RUN_FAILED_BREAK_PLAY = 8
RUN_UNKNOWN_ERROR = 255
def __init__(self, inventory, variable_manager, loader, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False, forks=None):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._forks = forks or 5
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
# make sure any module paths (if specified) are added to the module_loader
if context.CLIARGS.get('module_path', False):
for path in context.CLIARGS['module_path']:
if path:
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
try:
self._final_q = FinalQueue()
except OSError as e:
raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
self._callback_lock = threading.Lock()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
self._workers.append(None)
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
self._stdout_callback.set_options()
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
# get all configured loadable callbacks (adjacent, builtin)
callback_list = list(callback_loader.all(class_only=True))
# add enabled callbacks that refer to collections, which might not appear in normal listing
for c in C.CALLBACKS_ENABLED:
# load all, as collection ones might be using short/redirected names and not a fqcn
plugin = callback_loader.get(c, class_only=True)
# TODO: check if this skip is redundant, loader should handle bad file/plugin cases already
if plugin:
# avoids incorrect and dupes possible due to collections
if plugin not in callback_list:
callback_list.append(plugin)
else:
display.warning("Skipping callback plugin '%s', unable to load" % c)
# for each callback in the list see if we should add it to 'active callbacks' used in the play
for callback_plugin in callback_list:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
# try to get colleciotn world name first
cnames = getattr(callback_plugin, '_redirected_names', [])
if cnames:
# store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
callback_name = cnames[0]
else:
# fallback to 'old loader name'
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
display.vvvvv("Attempting to use '%s' callback." % (callback_name))
if callback_type == 'stdout':
# we only allow one callback of type 'stdout' to be loaded,
if callback_name != self._stdout_callback or stdout_callback_loaded:
display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
# TODO: remove special case for tree, which is an adhoc cli option --tree
pass
elif not self._run_additional_callbacks or (callback_needs_enabled and (
# only run if not adhoc, or adhoc was specifically configured to run + check enabled list
C.CALLBACKS_ENABLED is None or callback_name not in C.CALLBACKS_ENABLED)):
# 2.x plugins shipped with ansible should require enabling, older or non shipped should load automatically
continue
try:
callback_obj = callback_plugin()
# avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks,
# really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal.
if callback_obj:
# skip initializing if we already did the work for the same plugin (even with diff names)
if callback_obj not in self._callback_plugins:
callback_obj.set_options()
self._callback_plugins.append(callback_obj)
else:
display.vv("Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name))
else:
display.warning("Skipping callback '%s', as it does not create a valid plugin instance." % callback_name)
continue
except Exception as e:
display.warning("Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e)))
continue
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
warn_if_reserved(all_vars, templar.environment.globals.keys())
new_play = play.copy()
new_play.post_validate(templar)
new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno())
if (self._stdout_callback and
hasattr(self._stdout_callback, 'set_play_context')):
self._stdout_callback.set_play_context(play_context)
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done=self._start_at_done,
)
# adjust to # of workers to configured forks or size of batch, whatever is lower
self._initialize_processes(min(self._forks, iterator.batch_size))
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
for host_name in self._unreachable_hosts.keys():
iterator._play._removed_hosts.append(host_name)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if context.CLIARGS.get('start_at_task') is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
try:
play_return = strategy.run(iterator, play_context)
finally:
strategy.cleanup()
self._cleanup_processes()
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
if iterator.end_play:
raise AnsibleEndPlay(play_return)
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
# A bug exists in Python 2.6 that causes an exception to be raised during
# interpreter shutdown. This is only an issue in our CI testing but we
# hit it frequently enough to add a small sleep to avoid the issue.
# This can be removed once we have split controller available in CI.
#
# Further information:
# Issue: https://bugs.python.org/issue4106
# Fix: https://hg.python.org/cpython/rev/d316315a8781
#
try:
if (2, 6) == (sys.version_info[0:2]):
time.sleep(0.0001)
except (IndexError, AttributeError):
# In case there is an issue getting the version info, don't raise an Exception
pass
def _cleanup_processes(self):
if hasattr(self, '_workers'):
for attempts_remaining in range(C.WORKER_SHUTDOWN_POLL_COUNT - 1, -1, -1):
if not any(worker_prc and worker_prc.is_alive() for worker_prc in self._workers):
break
if attempts_remaining:
time.sleep(C.WORKER_SHUTDOWN_POLL_DELAY)
else:
display.warning('One or more worker processes are still running and will be terminated.')
for worker_prc in self._workers:
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def has_dead_workers(self):
# [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
defunct = False
for x in self._workers:
if getattr(x, 'exitcode', None):
defunct = True
return defunct
@lock_decorator(attr='_callback_lock')
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# a plugin can opt in to implicit tasks (such as meta). It does this
# by declaring self.wants_implicit_tasks = True.
wants_implicit_tasks = getattr(callback_plugin, 'wants_implicit_tasks', False)
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_', ''), None)
if gotit is not None:
methods.append(gotit)
# send clean copies
new_args = []
# If we end up being given an implicit task, we'll set this flag in
# the loop below. If the plugin doesn't care about those, then we
# check and continue to the next iteration of the outer loop.
is_implicit_task = False
for arg in args:
# FIXME: add play/task cleaners
if isinstance(arg, TaskResult):
new_args.append(arg.clean_copy())
# elif isinstance(arg, Play):
# elif isinstance(arg, Task):
else:
new_args.append(arg)
if isinstance(arg, Task) and arg.implicit:
is_implicit_task = True
if is_implicit_task and not wants_implicit_tasks:
continue
for method in methods:
try:
method(*new_args, **kwargs)
except Exception as e:
# TODO: add config toggle to make this fatal or not?
display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
from traceback import format_tb
from sys import exc_info
display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
|
gpl-3.0
|
suhe/odoo
|
addons/pad/py_etherpad/__init__.py
|
505
|
7804
|
"""Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class EtherpadLiteClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = 1 # TODO probably 1.1 sometime soon
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%d/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
|
gpl-3.0
|
shubhamVerma/code-eval
|
Category - Easy/sumdigitsCodeEval.py
|
1
|
1271
|
'''
sumdigitsCodeEval.py - Solution to Problem Lowercase (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Given a positive integer, find the sum of its constituent digits.
Input sample:
The first argument will be a text file containing positive integers, one per line.
e.g.
23
496
Output sample:
Print to stdout, the sum of the numbers that make up the integer, one per line.
e.g.
5
19
'''
import sys
if __name__ == '__main__':
f = open(sys.argv[1], 'r')
test_cases = f.read().split('\n')
for test_case in test_cases:
print sum( map(int, test_case) )
f.close()
|
gpl-3.0
|
DazWorrall/ansible
|
lib/ansible/modules/packaging/language/composer.py
|
24
|
9023
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: composer
author:
- "Dimitrios Tydeas Mengidis (@dmtrs)"
- "René Moser (@resmo)"
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- >
Composer is a tool for dependency management in PHP. It allows you to
declare the dependent libraries your project needs and it will install
them in your project for you.
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on.
required: false
default: install
arguments:
version_added: "2.0"
description:
- Composer arguments like required package, version and so on.
required: false
default: null
executable:
version_added: "2.4"
description:
- Path to PHP Executable on the remote host, if PHP is not in PATH
required: false
default: null
aliases: [ "php_path" ]
working_dir:
description:
- Directory of your project (see --working-dir). This is required when
the command is not run globally.
- Will be ignored if C(global_command=true).
required: false
default: null
aliases: [ "working-dir" ]
global_command:
version_added: "2.4"
description:
- Runs the specified command globally.
required: false
choices: [ true, false]
default: false
aliases: [ "global-command" ]
prefer_source:
description:
- Forces installation from package sources when possible (see --prefer-source).
required: false
default: false
choices: [ true, false]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions (see --prefer-dist).
required: false
default: false
choices: [ true, false]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages (see --no-dev).
required: false
default: true
choices: [ true, false]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
required: false
default: false
choices: [ true, false]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins ).
required: false
default: false
choices: [ true, false]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
- Recommended especially for production, but can take a bit of time to run.
required: false
default: true
choices: [ true, false]
aliases: [ "optimize-autoloader" ]
ignore_platform_reqs:
version_added: "2.0"
description:
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
required: false
default: false
choices: [ true, false]
aliases: [ "ignore-platform-reqs" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer:
command: install
working_dir: /path/to/project
- composer:
command: require
arguments: my/package
working_dir: /path/to/project
# Clone project and install with all dependencies
- composer:
command: create-project
arguments: package/package /path/to/project ~1.0
working_dir: /path/to/project
prefer_dist: yes
# Installs package globally
- composer:
command: require
global_command: yes
arguments: my/package
'''
import re
from ansible.module_utils.basic import AnsibleModule
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
return "Nothing to install or update" not in string
def get_available_options(module, command='install'):
# get all available options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s --format=json" % command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
command_help_json = module.from_json(out)
return command_help_json['definition']['options']
def composer_command(module, command, arguments="", options=None, global_command=False):
if options is None:
options = []
if module.params['executable'] is None:
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
else:
php_path = module.params['executable']
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(default="install", type="str", required=False),
arguments=dict(default="", type="str", required=False),
executable=dict(type="path", required=False, aliases=["php_path"]),
working_dir=dict(type="path", aliases=["working-dir"]),
global_command=dict(default=False, type="bool", aliases=["global-command"]),
prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
),
required_if=[('global_command', False, ['working_dir'])],
supports_check_mode=True
)
# Get composer command with fallback to default
command = module.params['command']
if re.search(r"\s", command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
# Default options
default_options = [
'no-ansi',
'no-interaction',
'no-progress',
]
for option in default_options:
if option in available_options:
option = "--%s" % option
options.append(option)
if not global_command:
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for param, option in option_params.items():
if module.params.get(param) and option in available_options:
option = "--%s" % option
options.append(option)
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_command(module, command, arguments, options, global_command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
if __name__ == '__main__':
main()
|
gpl-3.0
|
vmora/QGIS
|
python/plugins/processing/algs/gdal/rearrange_bands.py
|
5
|
5727
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
rearrange_bands.py
---------------------
Date : August 2018
Copyright : (C) 2018 by Mathieu Pellerin
Email : nirvn dot asia at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Mathieu Pellerin'
__date__ = 'August 2018'
__copyright__ = '(C) 2018, Mathieu Pellerin'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterEnum,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rearrange_bands(GdalAlgorithm):
INPUT = 'INPUT'
BANDS = 'BANDS'
OPTIONS = 'OPTIONS'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Use input layer data type', 'Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BANDS,
self.tr('Selected band(s)'),
None,
self.INPUT,
allowMultiple=True))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=0)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Converted')))
def name(self):
return 'rearrange_bands'
def displayName(self):
return self.tr('Rearrange bands')
def group(self):
return self.tr('Raster conversion')
def groupId(self):
return 'rasterconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'translate.png'))
def shortHelpString(self):
return self.tr("This algorithm creates a new raster using selected band(s) from a given raster layer.\n\n"
"The algorithm also makes it possible to reorder the bands for the newly-created raster.")
def commandName(self):
return 'gdal_translate'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments = []
bands = self.parameterAsInts(parameters, self.BANDS, context)
for band in bands:
arguments.append('-b {}'.format(band))
data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context)
if data_type:
arguments.append('-ot ' + self.TYPES[data_type])
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
arguments.append(inLayer.source())
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
gpl-2.0
|
frohoff/Empire
|
lib/modules/powershell/exploitation/exploit_jenkins.py
|
2
|
3352
|
import base64
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Exploit-Jenkins',
'Author': ['@luxcupitor'],
'Description': ("Run command on unauthenticated Jenkins Script consoles."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'Pass a command to run. If windows, you may have to prepend "cmd /c ".'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Rhost' : {
'Description' : 'Specify the host to exploit.',
'Required' : True,
'Value' : ''
},
'Port' : {
'Description' : 'Specify the port to use.',
'Required' : True,
'Value' : '8080'
},
'Cmd' : {
'Description' : 'command to run on remote jenkins script console.',
'Required' : True,
'Value' : 'whoami'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/exploitation/Exploit-Jenkins.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "\nExploit-Jenkins"
scriptEnd += " -Rhost "+str(self.options['Rhost']['Value'])
scriptEnd += " -Port "+str(self.options['Port']['Value'])
command = str(self.options['Cmd']['Value'])
# if the command contains spaces, wrap it in quotes before passing to ps script
if " " in command:
scriptEnd += " -Cmd \"" + command + "\""
else:
scriptEnd += " -Cmd " + command
if obfuscate:
scriptEnd = helpers.obfuscate(psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
|
bsd-3-clause
|
schimar/ngs_tools
|
remove_collapsed_clusters.py
|
2
|
1110
|
#! /usr/bin/python
#
# This script reads a fasta file (the last vsearch run with --id 0.8 # to test for whether clusters collapse at a lower id) and removes
# all entries that have(the 2nd) seqs > 1.
#
# Usage: ./remove_collapsed_clusters.py <input-file_name.fasta> <new_file_name.fasta>
import sys
import re
#import shutil
#import tempfile
newfile = open(sys.argv[2], 'a')
n_clusters = int()
with open(sys.argv[1], 'rb') as file:
for i, line in enumerate(file):
if line[0] == ">":
cluster = re.findall(';;seqs=[0-9]+', line)[0]
seq_n = int(re.findall('[0-9]+', cluster)[0])
# newline = str(cluster + ',' + seq_n + '\n')
#newfile.write(newline)
if seq_n != 1:
continue
else:
n_clusters += 1
newfile.write(line)
else:
if seq_n == 1:
newfile.write(line)
else:
continue
print n_clusters, "uncollapsed clusters found"
file.close()
newfile.close()
|
gpl-2.0
|
kajgan/stbgui
|
lib/python/Components/Converter/ClientsStreaming.py
|
1
|
3432
|
from Converter import Converter
from Poll import Poll
from Components.Element import cached
from Components.Sources.StreamService import StreamServiceList
from enigma import eStreamServer
from ServiceReference import ServiceReference
import socket
class ClientsStreaming(Converter, Poll, object):
UNKNOWN = -1
REF = 0
IP = 1
NAME = 2
ENCODER = 3
NUMBER = 4
SHORT_ALL = 5
ALL = 6
INFO = 7
INFO_RESOLVE = 8
INFO_RESOLVE_SHORT = 9
EXTRA_INFO = 10
def __init__(self, type):
Converter.__init__(self, type)
Poll.__init__(self)
self.poll_interval = 30000
self.poll_enabled = True
if type == "REF":
self.type = self.REF
elif type == "IP":
self.type = self.IP
elif type == "NAME":
self.type = self.NAME
elif type == "ENCODER":
self.type = self.ENCODER
elif type == "NUMBER":
self.type = self.NUMBER
elif type == "SHORT_ALL":
self.type = self.SHORT_ALL
elif type == "ALL":
self.type = self.ALL
elif type == "INFO":
self.type = self.INFO
elif type == "INFO_RESOLVE":
self.type = self.INFO_RESOLVE
elif type == "INFO_RESOLVE_SHORT":
self.type = self.INFO_RESOLVE_SHORT
elif type == "EXTRA_INFO":
self.type = self.EXTRA_INFO
else:
self.type = self.UNKNOWN
self.streamServer = eStreamServer.getInstance()
@cached
def getText(self):
if self.streamServer is None:
return ""
clients = []
refs = []
ips = []
names = []
encoders = []
extrainfo = _("ClientIP") + "\t" + _("Transcode") + "\t" + _("Channel") + "\n"
info = ""
for x in self.streamServer.getConnectedClients():
refs.append((x[1]))
servicename = ServiceReference(x[1]).getServiceName() or "(unknown service)"
service_name = servicename
names.append((service_name))
ip = x[0]
ips.append((ip))
if int(x[2]) == 0:
strtype = "S"
encoder = _('NO')
else:
strtype = "T"
encoder = _('YES')
encoders.append((encoder))
if self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
try:
raw = socket.gethostbyaddr(ip)
ip = raw[0]
except:
pass
if self.type == self.INFO_RESOLVE_SHORT:
ip, sep, tail = ip.partition('.')
info += ("%s %-8s %s\n") % (strtype, ip, service_name)
clients.append((ip, service_name, encoder))
extrainfo += ("%-8s\t%s\t%s") % (ip, encoder, service_name) +"\n"
if self.type == self.REF:
return ' '.join(refs)
elif self.type == self.IP:
return ' '.join(ips)
elif self.type == self.NAME:
return ' '.join(names)
elif self.type == self.ENCODER:
return _("Transcoding: ") + ' '.join(encoders)
elif self.type == self.NUMBER:
return str(len(clients))
elif self.type == self.EXTRA_INFO:
return extrainfo
elif self.type == self.SHORT_ALL:
return _("Total clients streaming: %d (%s)") % (len(clients), ' '.join(names))
elif self.type == self.ALL:
return '\n'.join(' '.join(elems) for elems in clients)
elif self.type == self.INFO or self.type == self.INFO_RESOLVE or self.type == self.INFO_RESOLVE_SHORT:
return info
else:
return "(unknown)"
return ""
text = property(getText)
@cached
def getBoolean(self):
if self.streamServer is None:
return False
return (self.streamServer.getConnectedClients() or StreamServiceList) and True or False
boolean = property(getBoolean)
def changed(self, what):
Converter.changed(self, (self.CHANGED_POLL,))
def doSuspend(self, suspended):
pass
|
gpl-2.0
|
viaict/viaduct
|
app/forms/pimpy.py
|
1
|
1268
|
import datetime
from flask_babel import _
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, DateTimeField, SelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import InputRequired, Optional
from app import constants
from app.service import group_service, pimpy_service
class AddTaskForm(FlaskForm):
name = StringField(_('Name'), validators=[InputRequired()])
content = TextAreaField(_('Content'), validators=[Optional()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
users = StringField(_('Users'))
status = SelectField(_('Status'), coerce=int,
choices=pimpy_service.get_task_status_choices())
class AddMinuteForm(FlaskForm):
content = TextAreaField(_('Minute content'), validators=[InputRequired()])
group = QuerySelectField(
_('Group'),
query_factory=lambda: group_service.get_groups_for_user(current_user),
get_label=lambda x: x.name)
date = DateTimeField(_('Date'), format=constants.DATE_FORMAT,
default=datetime.date.today)
|
mit
|
Cito/DBUtils
|
tests/mock_db.py
|
1
|
3341
|
"""This module serves as a mock object for the DB-API 2 module"""
threadsafety = 2
class Error(Exception):
pass
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
def connect(database=None, user=None):
return Connection(database, user)
class Connection:
has_ping = False
num_pings = 0
def __init__(self, database=None, user=None):
self.database = database
self.user = user
self.valid = False
if database == 'error':
raise OperationalError
self.open_cursors = 0
self.num_uses = 0
self.num_queries = 0
self.num_pings = 0
self.session = []
self.valid = True
def close(self):
if not self.valid:
raise InternalError
self.open_cursors = 0
self.num_uses = 0
self.num_queries = 0
self.session = []
self.valid = False
def commit(self):
if not self.valid:
raise InternalError
self.session.append('commit')
def rollback(self):
if not self.valid:
raise InternalError
self.session.append('rollback')
def ping(self):
cls = self.__class__
cls.num_pings += 1
if not cls.has_ping:
raise AttributeError
if not self.valid:
raise OperationalError
def cursor(self, name=None):
if not self.valid:
raise InternalError
return Cursor(self, name)
class Cursor:
def __init__(self, con, name=None):
self.con = con
self.valid = False
if name == 'error':
raise OperationalError
self.result = None
self.inputsizes = []
self.outputsizes = {}
con.open_cursors += 1
self.valid = True
def close(self):
if not self.valid:
raise InternalError
self.con.open_cursors -= 1
self.valid = False
def execute(self, operation):
if not self.valid or not self.con.valid:
raise InternalError
self.con.num_uses += 1
if operation.startswith('select '):
self.con.num_queries += 1
self.result = operation[7:]
elif operation.startswith('set '):
self.con.session.append(operation[4:])
self.result = None
elif operation == 'get sizes':
self.result = (self.inputsizes, self.outputsizes)
self.inputsizes = []
self.outputsizes = {}
else:
raise ProgrammingError
def fetchone(self):
if not self.valid:
raise InternalError
result = self.result
self.result = None
return result
def callproc(self, procname):
if not self.valid or not self.con.valid or not procname:
raise InternalError
self.con.num_uses += 1
def setinputsizes(self, sizes):
if not self.valid:
raise InternalError
self.inputsizes = sizes
def setoutputsize(self, size, column=None):
if not self.valid:
raise InternalError
self.outputsizes[column] = size
def __del__(self):
if self.valid:
self.close()
|
mit
|
amaret/wind.util
|
windutil/main.py
|
1
|
5085
|
# Copyright Amaret, Inc 2011-2015. All rights reserved.
''' Wind Docker Container Util '''
import os
import time
import json
from subprocess import call
from windutil.argparser import parse
from windutil.scrlogger import ScrLogger
LOG = ScrLogger()
DEFAULT_CONTAINER_CONFIG = [
{
'name': 'redis',
'priority': 0,
'run': 'docker run --name redis -p 6379:6379 -d redis',
'image': 'redis'
}
]
CONFIG_FILE_PATH = os.path.expanduser('~') + '/.wutilrc'
def _read_config():
''' look up config, if not found init '''
rcfile = os.path.expanduser('~') + '/.wutilrc'
if not os.path.exists(rcfile):
wutilrc = open(CONFIG_FILE_PATH, 'w')
LOG.debug("writing config to %s" % CONFIG_FILE_PATH)
wutilrc.write(
json.dumps(
DEFAULT_CONTAINER_CONFIG,
sort_keys=True,
indent=4,
separators=(',', ': ')))
wutilrc.close()
return DEFAULT_CONTAINER_CONFIG
LOG.debug("reading config from %s" % CONFIG_FILE_PATH)
wutilrc = open(CONFIG_FILE_PATH, 'r')
json_str = wutilrc.read()
wutilrc.close()
return json.loads(json_str)
def _load_config():
'''store by name for key'''
info = {}
for cntr in CONTAINER_CONFIG:
info[cntr['name']] = cntr
return info
CONTAINER_CONFIG = _read_config()
CONTAINER_INFO = _load_config()
def _rm(pargs):
'''rm'''
if pargs.use_all:
_container_command('rm', _sorted_config_names())
else:
_container_command('rm', pargs.containers)
def _start(pargs):
'''start'''
if pargs.use_all:
_container_command('start', _sorted_config_names())
else:
_container_command('start', pargs.containers)
def _stop(pargs):
'''stop'''
if pargs.use_all:
_container_command('stop', _reversed_config_names())
else:
_container_command('stop', pargs.containers)
def _container_command(command, names):
'''command'''
LOG.debug(command + "(ing) ")
for container in names:
LOG.debug(command + " " + container)
call(["docker", command, container])
if 'delay' in CONTAINER_INFO[container]:
secs = CONTAINER_INFO[container]['delay']
LOG.debug("sleeping %s seconds" % (secs))
time.sleep(secs)
def _run(pargs):
'''run'''
LOG.debug("run(ing)")
names = []
if pargs.use_all:
names = _sorted_config_names()
else:
names = pargs.containers
for container in names:
LOG.debug("run " + container)
arglist = CONTAINER_INFO[container]['run'].split()
call(arglist)
if 'delay' in CONTAINER_INFO[container]:
secs = CONTAINER_INFO[container]['delay']
LOG.debug("sleeping %s seconds" % (secs))
time.sleep(secs)
def _pull(pargs):
'''run'''
LOG.debug("pull(ing)")
names = []
if pargs.use_all:
names = _sorted_config_names()
else:
names = pargs.containers
for container in names:
LOG.debug("pull " + container)
img = CONTAINER_INFO[container]['image']
call(['docker', 'pull', img])
def _upgrade(pargs):
'''upgrade'''
if pargs.local is False:
_pull(pargs)
_stop(pargs)
_rm(pargs)
_run(pargs)
def _ps(pargs):
'''ps'''
option = '-a'
from subprocess import Popen, PIPE
process = Popen(["docker", "ps", option], stdout=PIPE)
(output, _) = process.communicate()
process.wait()
import string
lines = string.split(output, '\n')
status_idx = lines[0].index('STATUS')
print lines[0][status_idx:]
keys = CONTAINER_INFO.keys()
for line in lines[1:]:
if len(line) > 0:
cname = line[status_idx:].split()[-1]
if pargs.all or cname in keys:
print line[status_idx:]
def _reversed_config_names():
'''reverse list'''
return [x for x in reversed(_sorted_config_names())]
def _sorted_config_names():
'''manage dependencies'''
newlist = sorted(CONTAINER_INFO.values(), key=lambda x: x['priority'],
reverse=False)
return [x['name'] for x in newlist]
def main():
'''main entry point'''
# pylint: disable=too-many-branches
try:
cmd, pargs = parse()
pargs.use_all = 'containers' in pargs and pargs.containers[0] == 'all'
if cmd is 'init':
print "Initialized"
return
if cmd is 'ps':
_ps(pargs)
return
if cmd is 'start':
_start(pargs)
if cmd is 'login':
print "login command"
if cmd is 'pull':
_pull(pargs)
if cmd is 'rm':
_rm(pargs)
if cmd is 'run':
_run(pargs)
if cmd is 'stop':
_stop(pargs)
if cmd is 'upgrade':
_upgrade(pargs)
# pylint: disable=broad-except
except Exception, ex:
LOG.error(ex)
import traceback
trace = traceback.format_exc()
LOG.trace(trace)
|
gpl-2.0
|
miconof/headphones
|
headphones/notifiers.py
|
1
|
28911
|
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
from headphones import logger, helpers, common, request
from xml.dom import minidom
from httplib import HTTPSConnection
from urlparse import parse_qsl
from urllib import urlencode
from pynma import pynma
import base64
import cherrypy
import urllib
import urllib2
import headphones
import os.path
import subprocess
import gntp.notifier
import json
import oauth2 as oauth
import pythontwitter as twitter
from email.mime.text import MIMEText
import smtplib
import email.utils
class GROWL(object):
"""
Growl notifications, for OS X.
"""
def __init__(self):
self.enabled = headphones.CONFIG.GROWL_ENABLED
self.host = headphones.CONFIG.GROWL_HOST
self.password = headphones.CONFIG.GROWL_PASSWORD
def conf(self, options):
return cherrypy.config['config'].get('Growl', options)
def notify(self, message, event):
if not self.enabled:
return
# Split host and port
if self.host == "":
host, port = "localhost", 23053
if ":" in self.host:
host, port = self.host.split(':', 1)
port = int(port)
else:
host, port = self.host, 23053
# If password is empty, assume none
if self.password == "":
password = None
else:
password = self.password
# Register notification
growl = gntp.notifier.GrowlNotifier(
applicationName='Headphones',
notifications=['New Event'],
defaultNotifications=['New Event'],
hostname=host,
port=port,
password=password
)
try:
growl.register()
except gntp.notifier.errors.NetworkError:
logger.warning(u'Growl notification failed: network error')
return
except gntp.notifier.errors.AuthError:
logger.warning(u'Growl notification failed: authentication error')
return
# Fix message
message = message.encode(headphones.SYS_ENCODING, "replace")
# Send it, including an image
image_file = os.path.join(str(headphones.PROG_DIR),
"data/images/headphoneslogo.png")
with open(image_file, 'rb') as f:
image = f.read()
try:
growl.notify(
noteType='New Event',
title=event,
description=message,
icon=image
)
except gntp.notifier.errors.NetworkError:
logger.warning(u'Growl notification failed: network error')
return
logger.info(u"Growl notifications sent.")
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self, host, password):
self.enabled = True
self.host = host
self.password = password
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class PROWL(object):
"""
Prowl notifications.
"""
def __init__(self):
self.enabled = headphones.CONFIG.PROWL_ENABLED
self.keys = headphones.CONFIG.PROWL_KEYS
self.priority = headphones.CONFIG.PROWL_PRIORITY
def conf(self, options):
return cherrypy.config['config'].get('Prowl', options)
def notify(self, message, event):
if not headphones.CONFIG.PROWL_ENABLED:
return
http_handler = HTTPSConnection("api.prowlapp.com")
data = {'apikey': headphones.CONFIG.PROWL_KEYS,
'application': 'Headphones',
'event': event,
'description': message.encode("utf-8"),
'priority': headphones.CONFIG.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.info(u"Prowl notifications sent.")
return True
elif request_status == 401:
logger.info(u"Prowl auth failed: %s" % response.reason)
return False
else:
logger.info(u"Prowl notification failed.")
return False
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class MPC(object):
"""
MPC library update
"""
def __init__(self):
pass
def notify(self):
subprocess.call(["mpc", "update"])
class XBMC(object):
"""
XBMC notifications
"""
def __init__(self):
self.hosts = headphones.CONFIG.XBMC_HOST
self.username = headphones.CONFIG.XBMC_USERNAME
self.password = headphones.CONFIG.XBMC_PASSWORD
def _sendhttp(self, host, command):
url_command = urllib.urlencode(command)
url = host + '/xbmcCmds/xbmcHttp/?' + url_command
if self.password:
return request.request_content(url, auth=(self.username, self.password))
else:
return request.request_content(url)
def _sendjson(self, host, method, params={}):
data = [{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers, auth=(self.username, self.password))
else:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers)
if response:
return response[0]['result']
def update(self):
# From what I read you can't update the music library on a per directory or per path basis
# so need to update the whole thing
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to XBMC @ ' + host)
request = self._sendjson(host, 'AudioLibrary.Scan')
if not request:
logger.warn('Error sending update request to XBMC')
def notify(self, artist, album, albumartpath):
hosts = [x.strip() for x in self.hosts.split(',')]
header = "Headphones"
message = "%s - %s added to your library" % (artist, album)
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to XMBC @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties', {'properties': ['version']})['version']['major']
if version < 12: #Eden
notification = header + "," + message + "," + time + "," + albumartpath
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
else: #Frodo
params = {'title': header, 'message': message, 'displaytime': int(time), 'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification', params)
if not request:
raise Exception
except Exception:
logger.error('Error sending notification request to XBMC')
class LMS(object):
"""
Class for updating a Logitech Media Server
"""
def __init__(self):
self.hosts = headphones.CONFIG.LMS_HOST
def _sendjson(self, host):
data = {'id': 1, 'method': 'slim.request', 'params': ["", ["rescan"]]}
data = json.JSONEncoder().encode(data)
content = {'Content-Type': 'application/json'}
req = urllib2.Request(host + '/jsonrpc.js', data, content)
try:
handle = urllib2.urlopen(req)
except Exception as e:
logger.warn('Error opening LMS url: %s' % e)
return
response = json.JSONDecoder().decode(handle.read())
try:
return response['result']
except:
logger.warn('LMS returned error: %s' % response['error'])
return response['error']
def update(self):
hosts = [x.strip() for x in self.hosts.split(',')]
for host in hosts:
logger.info('Sending library rescan command to LMS @ ' + host)
request = self._sendjson(host)
if request:
logger.warn('Error sending rescan request to LMS')
class Plex(object):
def __init__(self):
self.server_hosts = headphones.CONFIG.PLEX_SERVER_HOST
self.client_hosts = headphones.CONFIG.PLEX_CLIENT_HOST
self.username = headphones.CONFIG.PLEX_USERNAME
self.password = headphones.CONFIG.PLEX_PASSWORD
self.token = headphones.CONFIG.PLEX_TOKEN
def _sendhttp(self, host, command):
url = host + '/xbmcCmds/xbmcHttp/?' + command
if self.password:
response = request.request_response(url, auth=(self.username, self.password))
else:
response = request.request_response(url)
return response
def _sendjson(self, host, method, params={}):
data = [{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers, auth=(self.username, self.password))
else:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers)
if response:
return response[0]['result']
def update(self):
# From what I read you can't update the music library on a per directory or per path basis
# so need to update the whole thing
hosts = [x.strip() for x in self.server_hosts.split(',')]
for host in hosts:
logger.info('Sending library update command to Plex Media Server@ ' + host)
url = "%s/library/sections" % host
if self.token:
params = {'X-Plex-Token': self.token}
else:
params = False
r = request.request_minidom(url, params=params)
sections = r.getElementsByTagName('Directory')
if not sections:
logger.info(u"Plex Media Server not running on: " + host)
return False
for s in sections:
if s.getAttribute('type') == "artist":
url = "%s/library/sections/%s/refresh" % (host, s.getAttribute('key'))
request.request_response(url, params=params)
def notify(self, artist, album, albumartpath):
hosts = [x.strip() for x in self.client_hosts.split(',')]
header = "Headphones"
message = "%s - %s added to your library" % (artist, album)
time = "3000" # in ms
for host in hosts:
logger.info('Sending notification command to Plex client @ ' + host)
try:
version = self._sendjson(host, 'Application.GetProperties', {'properties': ['version']})['version']['major']
if version < 12: #Eden
notification = header + "," + message + "," + time + "," + albumartpath
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
else: #Frodo
params = {'title': header, 'message': message, 'displaytime': int(time), 'image': albumartpath}
request = self._sendjson(host, 'GUI.ShowNotification', params)
if not request:
raise Exception
except Exception:
logger.error('Error sending notification request to Plex client @ ' + host)
class NMA(object):
def notify(self, artist=None, album=None, snatched=None):
title = 'Headphones'
api = headphones.CONFIG.NMA_APIKEY
nma_priority = headphones.CONFIG.NMA_PRIORITY
logger.debug(u"NMA title: " + title)
logger.debug(u"NMA API: " + api)
logger.debug(u"NMA Priority: " + str(nma_priority))
if snatched:
event = snatched + " snatched!"
message = "Headphones has snatched: " + snatched
else:
event = artist + ' - ' + album + ' complete!'
message = "Headphones has downloaded and postprocessed: " + artist + ' [' + album + ']'
logger.debug(u"NMA event: " + event)
logger.debug(u"NMA message: " + message)
batch = False
p = pynma.PyNMA()
keys = api.split(',')
p.addkey(keys)
if len(keys) > 1:
batch = True
response = p.push(title, event, message, priority=nma_priority, batch_mode=batch)
if not response[api][u'code'] == u'200':
logger.error(u'Could not send notification to NotifyMyAndroid')
return False
else:
return True
class PUSHBULLET(object):
def __init__(self):
self.apikey = headphones.CONFIG.PUSHBULLET_APIKEY
self.deviceid = headphones.CONFIG.PUSHBULLET_DEVICEID
def notify(self, message):
if not headphones.CONFIG.PUSHBULLET_ENABLED:
return
url = "https://api.pushbullet.com/v2/pushes"
data = {'type': "note",
'title': "Headphones",
'body': message}
if self.deviceid:
data['device_iden'] = self.deviceid
headers={'Content-type': "application/json",
'Authorization': 'Bearer ' + headphones.CONFIG.PUSHBULLET_APIKEY}
response = request.request_json(url, method="post", headers=headers, data=json.dumps(data))
if response:
logger.info(u"PushBullet notifications sent.")
return True
else:
logger.info(u"PushBullet notification failed.")
return False
class PUSHALOT(object):
def notify(self, message, event):
if not headphones.CONFIG.PUSHALOT_ENABLED:
return
pushalot_authorizationtoken = headphones.CONFIG.PUSHALOT_APIKEY
logger.debug(u"Pushalot event: " + event)
logger.debug(u"Pushalot message: " + message)
logger.debug(u"Pushalot api: " + pushalot_authorizationtoken)
http_handler = HTTPSConnection("pushalot.com")
data = {'AuthorizationToken': pushalot_authorizationtoken,
'Title': event.encode('utf-8'),
'Body': message.encode("utf-8")}
http_handler.request("POST",
"/api/sendmessage",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
logger.debug(u"Pushalot response status: %r" % request_status)
logger.debug(u"Pushalot response headers: %r" % response.getheaders())
logger.debug(u"Pushalot response body: %r" % response.read())
if request_status == 200:
logger.info(u"Pushalot notifications sent.")
return True
elif request_status == 410:
logger.info(u"Pushalot auth failed: %s" % response.reason)
return False
else:
logger.info(u"Pushalot notification failed.")
return False
class Synoindex(object):
def __init__(self, util_loc='/usr/syno/bin/synoindex'):
self.util_loc = util_loc
def util_exists(self):
return os.path.exists(self.util_loc)
def notify(self, path):
path = os.path.abspath(path)
if not self.util_exists():
logger.warn("Error sending notification: synoindex utility not found at %s" % self.util_loc)
return
if os.path.isfile(path):
cmd_arg = '-a'
elif os.path.isdir(path):
cmd_arg = '-A'
else:
logger.warn("Error sending notification: Path passed to synoindex was not a file or folder.")
return
cmd = [self.util_loc, cmd_arg, path]
logger.info("Calling synoindex command: %s" % str(cmd))
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=headphones.PROG_DIR)
out, error = p.communicate()
#synoindex never returns any codes other than '0', highly irritating
except OSError, e:
logger.warn("Error sending notification: %s" % str(e))
def notify_multiple(self, path_list):
if isinstance(path_list, list):
for path in path_list:
self.notify(path)
class PUSHOVER(object):
def __init__(self):
self.enabled = headphones.CONFIG.PUSHOVER_ENABLED
self.keys = headphones.CONFIG.PUSHOVER_KEYS
self.priority = headphones.CONFIG.PUSHOVER_PRIORITY
if headphones.CONFIG.PUSHOVER_APITOKEN:
self.application_token = headphones.CONFIG.PUSHOVER_APITOKEN
else:
self.application_token = "LdPCoy0dqC21ktsbEyAVCcwvQiVlsz"
def conf(self, options):
return cherrypy.config['config'].get('Pushover', options)
def notify(self, message, event):
if not headphones.CONFIG.PUSHOVER_ENABLED:
return
url = "https://api.pushover.net/1/messages.json"
data = {'token': self.application_token,
'user': headphones.CONFIG.PUSHOVER_KEYS,
'title': event,
'message': message.encode("utf-8"),
'priority': headphones.CONFIG.PUSHOVER_PRIORITY}
headers = {'Content-type': "application/x-www-form-urlencoded"}
response = request.request_response(url, method="POST", headers=headers, data=data)
if response:
logger.info(u"Pushover notifications sent.")
return True
else:
logger.error(u"Pushover notification failed.")
return False
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('Main Screen Activate', 'Test Message')
class TwitterNotifier(object):
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def __init__(self):
self.consumer_key = "oYKnp2ddX5gbARjqX8ZAAg"
self.consumer_secret = "A4Xkw9i5SjHbTk7XT8zzOPqivhj9MmRDR9Qn95YA9sk"
def notify_snatch(self, title):
if headphones.CONFIG.TWITTER_ONSNATCH:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH] + ': ' + title + ' at ' + helpers.now())
def notify_download(self, title):
if headphones.CONFIG.TWITTER_ENABLED:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD] + ': ' + title + ' at ' + helpers.now())
def test_notify(self):
return self._notifyTwitter("This is a test notification from Headphones at " + helpers.now(), force=True)
def _get_authorization(self):
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
else:
request_token = dict(parse_qsl(content))
headphones.CONFIG.TWITTER_USERNAME = request_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL + "?oauth_token=" + request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = headphones.CONFIG.TWITTER_USERNAME
request_token['oauth_token_secret'] = headphones.CONFIG.TWITTER_PASSWORD
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
logger.info('Generating and signing request for an access token using key ' + key)
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: ' + str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: ' + str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.info('resp, content: ' + str(resp) + ',' + str(content))
access_token = dict(parse_qsl(content))
logger.info('access_token: ' + str(access_token))
logger.info('resp[status] = ' + str(resp['status']))
if resp['status'] != '200':
logger.info('The request for a token with did not succeed: ' + str(resp['status']), logger.ERROR)
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token'])
logger.info('Access Token secret: %s' % access_token['oauth_token_secret'])
headphones.CONFIG.TWITTER_USERNAME = access_token['oauth_token']
headphones.CONFIG.TWITTER_PASSWORD = access_token['oauth_token_secret']
return True
def _send_tweet(self, message=None):
username = self.consumer_key
password = self.consumer_secret
access_token_key = headphones.CONFIG.TWITTER_USERNAME
access_token_secret = headphones.CONFIG.TWITTER_PASSWORD
logger.info(u"Sending tweet: " + message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message)
except Exception as e:
logger.info(u"Error Sending Tweet: %s" % e)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = headphones.CONFIG.TWITTER_PREFIX
if not headphones.CONFIG.TWITTER_ENABLED and not force:
return False
return self._send_tweet(prefix + ": " + message)
class OSX_NOTIFY(object):
def __init__(self):
try:
self.objc = __import__("objc")
self.AppKit = __import__("AppKit")
except:
logger.warn('OS X Notification: Cannot import objc or AppKit')
return False
def swizzle(self, cls, SEL, func):
old_IMP = getattr(cls, SEL, None)
if old_IMP is None:
old_IMP = cls.instanceMethodForSelector_(SEL)
def wrapper(self, *args, **kwargs):
return func(self, old_IMP, *args, **kwargs)
new_IMP = self.objc.selector(
wrapper,
selector=old_IMP.selector,
signature=old_IMP.signature
)
self.objc.classAddMethod(cls, SEL.encode(), new_IMP)
def notify(self, title, subtitle=None, text=None, sound=True, image=None):
try:
self.swizzle(
self.objc.lookUpClass('NSBundle'),
'bundleIdentifier',
self.swizzled_bundleIdentifier
)
NSUserNotification = self.objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = self.objc.lookUpClass('NSUserNotificationCenter')
NSAutoreleasePool = self.objc.lookUpClass('NSAutoreleasePool')
if not NSUserNotification or not NSUserNotificationCenter:
return False
pool = NSAutoreleasePool.alloc().init()
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
if subtitle:
notification.setSubtitle_(subtitle)
if text:
notification.setInformativeText_(text)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
if image:
source_img = self.AppKit.NSImage.alloc().initByReferencingFile_(image)
notification.setContentImage_(source_img)
#notification.set_identityImage_(source_img)
notification.setHasActionButton_(False)
notification_center = NSUserNotificationCenter.defaultUserNotificationCenter()
notification_center.deliverNotification_(notification)
del pool
return True
except Exception as e:
logger.warn('Error sending OS X Notification: %s' % e)
return False
def swizzled_bundleIdentifier(self, original, swizzled):
return 'ade.headphones.osxnotify'
class BOXCAR(object):
def __init__(self):
self.url = 'https://new.boxcar.io/api/notifications'
def notify(self, title, message, rgid=None):
try:
if rgid:
message += '<br></br><a href="http://musicbrainz.org/release-group/%s">MusicBrainz</a>' % rgid
data = urllib.urlencode({
'user_credentials': headphones.CONFIG.BOXCAR_TOKEN,
'notification[title]': title.encode('utf-8'),
'notification[long_message]': message.encode('utf-8'),
'notification[sound]': "done"
})
req = urllib2.Request(self.url)
handle = urllib2.urlopen(req, data)
handle.close()
return True
except urllib2.URLError as e:
logger.warn('Error sending Boxcar2 Notification: %s' % e)
return False
class SubSonicNotifier(object):
def __init__(self):
self.host = headphones.CONFIG.SUBSONIC_HOST
self.username = headphones.CONFIG.SUBSONIC_USERNAME
self.password = headphones.CONFIG.SUBSONIC_PASSWORD
def notify(self, albumpaths):
# Correct URL
if not self.host.lower().startswith("http"):
self.host = "http://" + self.host
if not self.host.lower().endswith("/"):
self.host = self.host + "/"
# Invoke request
request.request_response(self.host + "musicFolderSettings.view?scanNow",
auth=(self.username, self.password))
class Email(object):
def notify(self, subject, message):
message = MIMEText(message, 'plain', "utf-8")
message['Subject'] = subject
message['From'] = email.utils.formataddr(('Headphones', headphones.CONFIG.EMAIL_FROM))
message['To'] = headphones.CONFIG.EMAIL_TO
try:
if (headphones.CONFIG.EMAIL_SSL):
mailserver = smtplib.SMTP_SSL(headphones.CONFIG.EMAIL_SMTP_SERVER, headphones.CONFIG.EMAIL_SMTP_PORT)
else:
mailserver = smtplib.SMTP(headphones.CONFIG.EMAIL_SMTP_SERVER, headphones.CONFIG.EMAIL_SMTP_PORT)
if (headphones.CONFIG.EMAIL_TLS):
mailserver.starttls()
mailserver.ehlo()
if headphones.CONFIG.EMAIL_SMTP_USER:
mailserver.login(headphones.CONFIG.EMAIL_SMTP_USER, headphones.CONFIG.EMAIL_SMTP_PASSWORD)
mailserver.sendmail(headphones.CONFIG.EMAIL_FROM, headphones.CONFIG.EMAIL_TO, message.as_string())
mailserver.quit()
return True
except Exception, e:
logger.warn('Error sending Email: %s' % e)
return False
|
gpl-3.0
|
termoshtt/DataProcessor
|
lib/dataprocessor/tests/test_scan.py
|
3
|
9544
|
# coding=utf-8
"""Test for scan."""
import os
from .utils import TestNodeListAndDir
from ..pipes.scan import directory
class TestScan(TestNodeListAndDir):
"""Unittest for dataprocessor.pipes.scan.
Attributes
----------
tempdir_paths : list
list of project root dir path
node_list : list
"""
def setUp(self):
"""Prepare test environment."""
self._generate_test_directories()
def _generate_test_directories(self):
"""Generate test directories.
Generated directories and files are as follows,
(dir-path, including-dirs, including-files)
('/tmpdir_path', ['run0', 'run1', 'run2'], [])
('/tmpdir_path/run0', ['run0', 'run1'], ['test.conf'])
('/tmpdir_path/run0/run0', ['data'], [])
('/tmpdir_path/run0/run0/data', [], ['hoge.conf'])
('/tmpdir_path/run0/run1', [], ['test.conf'])
('/tmpdir_path/run1', [], ['test.conf'])
('/tmpdir_path/run2', ['data'], [])
('/tmpdir_path/run2/data', [], ['test.conf'])
('/tmpdir_path/run2/dummy', [], [])
('/tmpdir_path/run3', ['data'], []) # symboliclink to run2
"""
import tempfile
self.tempdir_path = tempfile.mkdtemp()
root = self.tempdir_path
for i in range(3):
os.mkdir(os.path.join(root, "run" + str(i)))
for i in range(2):
open(os.path.join(root, "run" + str(i), "test.conf"),
"w").close()
for i in range(2):
os.mkdir(os.path.join(root, "run0", "run" + str(i)))
os.mkdir(os.path.join(root, "run2", "data"))
os.mkdir(os.path.join(root, "run2", "dummy"))
os.mkdir(os.path.join(root, "run0", "run0", "data"))
open(os.path.join(root, "run0", "run1", "test.conf"), "w").close()
open(os.path.join(root, "run2", "data", "test.conf"), "w").close()
open(os.path.join(root, "run0", "run0", "data", "hoge.conf"),
"w").close()
os.symlink(os.path.join(root, "run2"), os.path.join(root, "run3"))
def test_directory_for_first_scan1(self):
"""Test for initial scan."""
node_list = []
root_dir = self.tempdir_path
# whitelist specifies directory.
node_list = directory(node_list, root_dir, "data")
compare_node_list = [
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, "run2")],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, "run0"),
'parents': [],
'children': [os.path.join(root_dir, "run0/run0")],
'name': 'run0',
'type': 'project'},
{'path': os.path.join(root_dir, "run0/run0"),
'parents': [os.path.join(root_dir, "run0")],
'children': [],
'name': 'run0',
'type': 'run'},
{'path': os.path.join(root_dir, "run2"),
'parents': [root_dir],
'children': [],
'name': 'run2',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_first_scan2(self):
"""Test for initial scan."""
node_list = []
root_dir = self.tempdir_path
# whitelist have two elements.
node_list = directory(node_list, root_dir,
["data/hoge*", "data/test*"])
compare_node_list = [
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, "run2")],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, "run0"),
'parents': [],
'children': [os.path.join(root_dir, "run0/run0")],
'name': 'run0',
'type': 'project'},
{'path': os.path.join(root_dir, "run0/run0"),
'parents': [os.path.join(root_dir, "run0")],
'children': [],
'name': 'run0',
'type': 'run'},
{'path': os.path.join(root_dir, "run2"),
'parents': [root_dir],
'children': [],
'name': 'run2',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_first_scan3(self):
"""Test for initial scan."""
node_list = []
root_dir = self.tempdir_path
# whitelist has `..`.
node_list = directory(node_list, root_dir,
"../data")
compare_node_list = [
{'path': os.path.join(root_dir, "run0", "run0"),
'parents': [],
'children': [os.path.join(root_dir, "run0", "run0", "data")],
'name': "run0",
'type': 'project'},
{'path': os.path.join(root_dir, "run0", "run0", "data"),
'parents': [os.path.join(root_dir, "run0", "run0")],
'children': [],
'name': 'data',
'type': 'run'},
{'path': os.path.join(root_dir, "run2"),
'parents': [],
'children': [os.path.join(root_dir, "run2", "data"),
os.path.join(root_dir, "run2", "dummy")],
'name': "run2",
'type': 'project'},
{'path': os.path.join(root_dir, "run2", "data"),
'parents': [os.path.join(root_dir, "run2")],
'children': [],
'name': 'data',
'type': 'run'},
# This path is also added to node list.
{'path': os.path.join(root_dir, "run2", "dummy"),
'parents': [os.path.join(root_dir, "run2")],
'children': [],
'name': 'dummy',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_first_scan4(self):
"""Test for initial scan with symbolic link."""
node_list = []
root_dir = self.tempdir_path
# followlinks is `True`.
node_list = directory(node_list, root_dir,
"data/test.conf", followlinks=True)
compare_node_list = [
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, "run2"),
os.path.join(root_dir, "run3")],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, "run2"),
'parents': [root_dir],
'children': [],
'name': 'run2',
'type': 'run'},
# Symbolic link is also added to node list.
{'path': os.path.join(root_dir, "run3"),
'parents': [root_dir],
'children': [],
'name': 'run3',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_directory_for_rescan(self):
"""Test for rescan."""
root_dir = self.tempdir_path
node_list = [{'path': os.path.join(root_dir, "run0"),
'parents': [], # empty
'children': [], # empty
'name': 'run0',
'type': 'run'}]
node_list = directory(node_list, root_dir, "*.conf")
compare_node_list = [
{'path': os.path.join(root_dir, 'run0'),
'parents': [root_dir], # fill
'children': [os.path.join(root_dir, 'run0/run1')], # fill
'name': 'run0',
'type': 'run'},
{'path': root_dir,
'parents': [],
'children': [os.path.join(root_dir, 'run0'),
os.path.join(root_dir, 'run1')],
'name': os.path.basename(root_dir),
'type': 'project'},
{'path': os.path.join(root_dir, 'run0/run0'),
'parents': [],
'children': [os.path.join(root_dir, 'run0/run0/data')],
'name': 'run0',
'type': 'project'},
{'path': os.path.join(root_dir, 'run0/run0/data'),
'parents': [os.path.join(root_dir, 'run0/run0')],
'children': [],
'name': 'data',
'type': 'run'},
{'path': os.path.join(root_dir, 'run0/run1'),
'parents': [os.path.join(root_dir, 'run0')],
'children': [],
'name': 'run1',
'type': 'run'},
{'path': os.path.join(root_dir, 'run1'),
'parents': [root_dir],
'children': [],
'name': 'run1',
'type': 'run'},
{'path': os.path.join(root_dir, 'run2'),
'parents': [],
'children': [os.path.join(root_dir, 'run2/data')],
'name': 'run2',
'type': 'project'},
{'path': os.path.join(root_dir, 'run2/data'),
'parents': [os.path.join(root_dir, 'run2')],
'children': [],
'name': 'data',
'type': 'run'}]
self.assertEqual(node_list, compare_node_list)
def test_rescan_failed(self):
root_dir = self.tempdir_path
node_list = [{'path': os.path.join(root_dir, "run0"),
'children': [], # empty and no parents key.
'name': 'run0',
'type': 'run'}]
with self.assertRaises(KeyError):
node_list = directory(node_list, root_dir, ["*.conf"])
|
gpl-3.0
|
freakboy3742/django
|
tests/forms_tests/field_tests/test_charfield.py
|
27
|
6355
|
from django.core.exceptions import ValidationError
from django.forms import (
CharField, HiddenInput, PasswordInput, Textarea, TextInput,
)
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class CharFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
msg = "'Ensure this value has at most 10 characters (it has 11).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('1234567890a')
self.assertEqual(f.max_length, 10)
self.assertIsNone(f.min_length)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Setting min_length or max_length to something that is not a number
raises an exception.
"""
with self.assertRaises(ValueError):
CharField(min_length='a')
with self.assertRaises(ValueError):
CharField(max_length='a')
msg = '__init__() takes 1 positional argument but 2 were given'
with self.assertRaisesMessage(TypeError, msg):
CharField('a')
def test_charfield_widget_attrs(self):
"""
CharField.widget_attrs() always returns a dictionary and includes
minlength/maxlength if min_length/max_length are defined on the field
and the widget is not hidden.
"""
# Return an empty dictionary if max_length and min_length are both None.
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Return a maxlength attribute equal to max_length.
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
# Return a minlength attribute equal to min_length.
f = CharField(min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'minlength': '5'})
# Return both maxlength and minlength when both max_length and
# min_length are set.
f = CharField(max_length=10, min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(HiddenInput()), {})
def test_charfield_strip(self):
"""
Values have whitespace stripped but not if strip=False.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
def test_strip_before_checking_empty(self):
"""
A whitespace-only value, ' ', is stripped to an empty string and then
converted to the empty value, None.
"""
f = CharField(required=False, empty_value=None)
self.assertIsNone(f.clean(' '))
def test_clean_non_string(self):
"""CharField.clean() calls str(value) before stripping it."""
class StringWrapper:
def __init__(self, v):
self.v = v
def __str__(self):
return self.v
value = StringWrapper(' ')
f1 = CharField(required=False, empty_value=None)
self.assertIsNone(f1.clean(value))
f2 = CharField(strip=False)
self.assertEqual(f2.clean(value), ' ')
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(f, '<input type="text" name="f" id="id_f" disabled required>')
def test_null_characters_prohibited(self):
f = CharField()
msg = 'Null characters are not allowed.'
with self.assertRaisesMessage(ValidationError, msg):
f.clean('\x00something')
|
bsd-3-clause
|
shaggytwodope/qutebrowser
|
tests/end2end/fixtures/test_webserver.py
|
9
|
2499
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test the httpbin webserver used for tests."""
import json
import urllib.request
import urllib.error
import pytest
@pytest.mark.parametrize('path, content, expected', [
('/', '<title>httpbin(1): HTTP Client Testing Service</title>', True),
# https://github.com/Runscope/httpbin/issues/245
('/', 'www.google-analytics.com', False),
('/data/hello.txt', 'Hello World!', True),
])
def test_httpbin(httpbin, qtbot, path, content, expected):
with qtbot.waitSignal(httpbin.new_request, timeout=100):
url = 'http://localhost:{}{}'.format(httpbin.port, path)
try:
response = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
# "Though being an exception (a subclass of URLError), an HTTPError
# can also function as a non-exceptional file-like return value
# (the same thing that urlopen() returns)."
# ...wat
print(e.read().decode('utf-8'))
raise
data = response.read().decode('utf-8')
assert httpbin.get_requests() == [httpbin.ExpectedRequest('GET', path)]
assert (content in data) == expected
@pytest.mark.parametrize('line, verb, path, equal', [
({'verb': 'GET', 'path': '/', 'status': 200}, 'GET', '/', True),
({'verb': 'GET', 'path': '/foo/', 'status': 200}, 'GET', '/foo', True),
({'verb': 'GET', 'path': '/', 'status': 200}, 'GET', '/foo', False),
({'verb': 'POST', 'path': '/', 'status': 200}, 'GET', '/', False),
])
def test_expected_request(httpbin, line, verb, path, equal):
expected = httpbin.ExpectedRequest(verb, path)
request = httpbin.Request(json.dumps(line))
assert (expected == request) == equal
|
gpl-3.0
|
goliveirab/odoo
|
addons/point_of_sale/wizard/pos_open_statement.py
|
387
|
4217
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class pos_open_statement(osv.osv_memory):
_name = 'pos.open.statement'
_description = 'Open Statements'
def open_statement(self, cr, uid, ids, context=None):
"""
Open the statements
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Blank Directory
"""
data = {}
mod_obj = self.pool.get('ir.model.data')
statement_obj = self.pool.get('account.bank.statement')
sequence_obj = self.pool.get('ir.sequence')
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
st_ids = []
j_ids = journal_obj.search(cr, uid, [('journal_user','=',1)], context=context)
if not j_ids:
raise osv.except_osv(_('No Cash Register Defined!'), _('You have to define which payment method must be available in the point of sale by reusing existing bank and cash through "Accounting / Configuration / Journals / Journals". Select a journal and check the field "PoS Payment Method" from the "Point of Sale" tab. You can also create new payment methods directly from menu "PoS Backend / Configuration / Payment Methods".'))
for journal in journal_obj.browse(cr, uid, j_ids, context=context):
ids = statement_obj.search(cr, uid, [('state', '!=', 'confirm'), ('user_id', '=', uid), ('journal_id', '=', journal.id)], context=context)
if journal.sequence_id:
number = sequence_obj.next_by_id(cr, uid, journal.sequence_id.id, context=context)
else:
number = sequence_obj.next_by_code(cr, uid, 'account.cash.statement', context=context)
data.update({
'journal_id': journal.id,
'user_id': uid,
'state': 'draft',
'name': number
})
statement_id = statement_obj.create(cr, uid, data, context=context)
st_ids.append(int(statement_id))
if journal.cash_control:
statement_obj.button_open(cr, uid, [statement_id], context)
tree_res = mod_obj.get_object_reference(cr, uid, 'point_of_sale', 'view_cash_statement_pos_tree')
tree_id = tree_res and tree_res[1] or False
form_res = mod_obj.get_object_reference(cr, uid, 'account', 'view_bank_statement_form2')
form_id = form_res and form_res[1] or False
search_res = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_bank_statement_filter')
search_id = search_res and search_res[1] or False
return {
'type': 'ir.actions.act_window',
'name': _('List of Cash Registers'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.bank.statement',
'domain': str([('id', 'in', st_ids)]),
'views': [(tree_id, 'tree'), (form_id, 'form')],
'search_view_id': search_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
sorenk/ansible
|
lib/ansible/modules/cloud/amazon/ec2_snapshot.py
|
27
|
9687
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
if __name__ == '__main__':
main()
|
gpl-3.0
|
bluestemscott/librarygadget
|
librarygadget/librarybot/migrations/0001_initial.py
|
1
|
15532
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('librarybot_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('api_key', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('account_level', self.gf('django.db.models.fields.CharField')(default='free', max_length=10)),
('paid_last_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('paid_first_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal('librarybot', ['UserProfile'])
# Adding model 'Library'
db.create_table('librarybot_library', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('catalogurl', self.gf('django.db.models.fields.URLField')(max_length=200)),
('librarysystem', self.gf('django.db.models.fields.CharField')(max_length=20)),
('renew_supported_code', self.gf('django.db.models.fields.CharField')(default='untested', max_length=10)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('lastmodified', self.gf('django.db.models.fields.DateField')(auto_now=True, blank=True)),
))
db.send_create_signal('librarybot', ['Library'])
# Adding model 'Patron'
db.create_table('librarybot_patron', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('library', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Library'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('patronid', self.gf('django.db.models.fields.CharField')(max_length=40)),
('pin', self.gf('django.db.models.fields.CharField')(max_length=75)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150, null=True)),
('save_history', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('lastchecked', self.gf('django.db.models.fields.DateTimeField')()),
('batch_last_run', self.gf('django.db.models.fields.DateField')(null=True)),
))
db.send_create_signal('librarybot', ['Patron'])
# Adding model 'Item'
db.create_table('librarybot_item', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Patron'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('author', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('outDate', self.gf('django.db.models.fields.DateField')(null=True)),
('dueDate', self.gf('django.db.models.fields.DateField')(null=True)),
('timesRenewed', self.gf('django.db.models.fields.SmallIntegerField')(null=True)),
('isbn', self.gf('django.db.models.fields.CharField')(max_length=25, null=True)),
('asof', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('librarybot', ['Item'])
# Adding model 'AccessLog'
db.create_table('librarybot_accesslog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Patron'])),
('library', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['librarybot.Library'])),
('viewfunc', self.gf('django.db.models.fields.CharField')(max_length=50)),
('error', self.gf('django.db.models.fields.CharField')(max_length=150)),
('error_stacktrace', self.gf('django.db.models.fields.CharField')(max_length=3000)),
('date', self.gf('django.db.models.fields.DateField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('librarybot', ['AccessLog'])
# Adding model 'LibraryRequest'
db.create_table('librarybot_libraryrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('libraryname', self.gf('django.db.models.fields.CharField')(max_length=100)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2)),
('catalogurl', self.gf('django.db.models.fields.URLField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('patronid', self.gf('django.db.models.fields.CharField')(max_length=40)),
('password', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('librarybot', ['LibraryRequest'])
# Adding model 'RenewalResponse'
db.create_table('librarybot_renewalresponse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('token', self.gf('django.db.models.fields.CharField')(max_length=36)),
('response', self.gf('django.db.models.fields.TextField')()),
('cachedate', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('librarybot', ['RenewalResponse'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('librarybot_userprofile')
# Deleting model 'Library'
db.delete_table('librarybot_library')
# Deleting model 'Patron'
db.delete_table('librarybot_patron')
# Deleting model 'Item'
db.delete_table('librarybot_item')
# Deleting model 'AccessLog'
db.delete_table('librarybot_accesslog')
# Deleting model 'LibraryRequest'
db.delete_table('librarybot_libraryrequest')
# Deleting model 'RenewalResponse'
db.delete_table('librarybot_renewalresponse')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'librarybot.accesslog': {
'Meta': {'object_name': 'AccessLog'},
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'error_stacktrace': ('django.db.models.fields.CharField', [], {'max_length': '3000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'library': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Library']"}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Patron']"}),
'viewfunc': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'librarybot.item': {
'Meta': {'object_name': 'Item'},
'asof': ('django.db.models.fields.DateField', [], {}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dueDate': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'outDate': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Patron']"}),
'timesRenewed': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'librarybot.library': {
'Meta': {'object_name': 'Library'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'catalogurl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastmodified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'librarysystem': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'renew_supported_code': ('django.db.models.fields.CharField', [], {'default': "'untested'", 'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'librarybot.libraryrequest': {
'Meta': {'object_name': 'LibraryRequest'},
'catalogurl': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'libraryname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'patronid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'librarybot.patron': {
'Meta': {'object_name': 'Patron'},
'batch_last_run': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastchecked': ('django.db.models.fields.DateTimeField', [], {}),
'library': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['librarybot.Library']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True'}),
'patronid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'save_history': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'librarybot.renewalresponse': {
'Meta': {'object_name': 'RenewalResponse'},
'cachedate': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'librarybot.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'account_level': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '10'}),
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_first_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'paid_last_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['librarybot']
|
mit
|
jaredjennings/snowy
|
libs/openshiftlibs.py
|
8
|
3967
|
#!/usr/bin/env python
__author__ = 'N. Harrison Ripps'
"""
This library was written to the original django-example project -
https://github.com/openshift/django-example
by @url(https://github.com/nhr), but since it was placed inside
the django project folder I've removed it when I started working
on my fork -
https://github.com/ZackYovel/django-example
Since it is required by the .openshift/action_hooks/secure_db.py
action hook and since this library is basically a recommendation
of the openshift providers, I'm adding it again but placing it
in the libs folder, as a generic gependency and not a project
specific file.
running 'grep -r openshiftlibs' resulted in one file that
references this library: .openshift/action_hooks/secure_db.py.
"""
import hashlib, inspect, os, random, sys
# Gets the secret token provided by OpenShift
# or generates one (this is slightly less secure, but good enough for now)
def get_openshift_secret_token():
token = os.getenv('OPENSHIFT_SECRET_TOKEN')
name = os.getenv('OPENSHIFT_APP_NAME')
uuid = os.getenv('OPENSHIFT_APP_UUID')
if token is not None:
return token
elif (name is not None and uuid is not None):
return hashlib.sha256(name + '-' + uuid).hexdigest()
return None
# Loop through all provided variables and generate secure versions
# If not running on OpenShift, returns defaults and logs an error message
#
# This function calls secure_function and passes an array of:
# {
# 'hash': generated sha hash,
# 'variable': name of variable,
# 'original': original value
# }
def openshift_secure(default_keys, secure_function = 'make_secure_key'):
# Attempts to get secret token
my_token = get_openshift_secret_token()
# Only generate random values if on OpenShift
my_list = default_keys
if my_token is not None:
# Loop over each default_key and set the new value
for key, value in default_keys.iteritems():
# Create hash out of token and this key's name
sha = hashlib.sha256(my_token + '-' + key).hexdigest()
# Pass a dictionary so we can add stuff without breaking existing calls
vals = { 'hash': sha, 'variable': key, 'original': value }
# Call user specified function or just return hash
my_list[key] = sha
if secure_function is not None:
# Pick through the global and local scopes to find the function.
possibles = globals().copy()
possibles.update(locals())
supplied_function = possibles.get(secure_function)
if not supplied_function:
raise Exception("Cannot find supplied security function")
else:
my_list[key] = supplied_function(vals)
else:
calling_file = inspect.stack()[1][1]
if os.getenv('OPENSHIFT_REPO_DIR'):
base = os.getenv('OPENSHIFT_REPO_DIR')
calling_file.replace(base,'')
sys.stderr.write("OPENSHIFT WARNING: Using default values for secure variables, please manually modify in " + calling_file + "\n")
return my_list
# This function transforms default keys into per-deployment random keys;
def make_secure_key(key_info):
hashcode = key_info['hash']
key = key_info['variable']
original = key_info['original']
# These are the legal password characters
# as per the Django source code
# (django/contrib/auth/models.py)
chars = 'abcdefghjkmnpqrstuvwxyz'
chars += 'ABCDEFGHJKLMNPQRSTUVWXYZ'
chars += '23456789'
# Use the hash to seed the RNG
random.seed(int("0x" + hashcode[:8], 0))
# Create a random string the same length as the default
rand_key = ''
for _ in range(len(original)):
rand_pos = random.randint(0,len(chars))
rand_key += chars[rand_pos:(rand_pos+1)]
# Reset the RNG
random.seed()
# Set the value
return rand_key
|
agpl-3.0
|
rcmachado/pysuru
|
pysuru/tests/test_http.py
|
1
|
1256
|
# coding: utf-8
try:
from unittest import mock
except ImportError:
import mock
from pysuru.http import HttpClient
def test_headers_attribute_should_always_have_authorization_header_with_token():
client = HttpClient('TARGET', 'TOKEN')
assert 'Authorization' in client.headers
assert client.headers['Authorization'] == 'bearer TOKEN'
def test_urlopen_should_build_full_url_using_target_and_path():
client = HttpClient('example.com/api', 'TOKEN')
client.conn.request = mock.MagicMock()
client.urlopen('GET', '/sample')
expected_url = 'http://example.com/api/sample'
assert client.conn.request.call_args_list == [
mock.call('GET', expected_url, headers=mock.ANY, fields=None)]
def test_urlopen_should_merge_headers_argument_with_headers_attribute():
my_headers = {
'X-Custom-Header': 'custom value'
}
expected_headers = {
'Authorization': 'bearer TOKEN',
'X-Custom-Header': 'custom value'
}
client = HttpClient('TARGET', 'TOKEN')
client.conn.request = mock.MagicMock()
client.urlopen('GET', '/sample', headers=my_headers)
assert client.conn.request.call_args_list == [
mock.call('GET', mock.ANY, headers=expected_headers, fields=None)]
|
mit
|
ardumont/linux
|
scripts/checkkconfigsymbols.py
|
88
|
15783
|
#!/usr/bin/env python2
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2015 Valentin Rothberg <[email protected]>
# (c) 2014 Stefan Hengelein <[email protected]>
#
# Licensed under the terms of the GNU GPL License version 2
import difflib
import os
import re
import signal
import sys
from multiprocessing import Pool, cpu_count
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)')
REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
usage = "%prog [options]\n\n" \
"Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in\nKconfig. The output of this tool has the " \
"format \'Undefined symbol\\tFile list\'\n\n" \
"If no option is specified, %prog will default to check your\n" \
"current tree. Please note that specifying commits will " \
"\'git reset --hard\'\nyour current tree! You may save " \
"uncommitted changes to avoid losing data."
parser = OptionParser(usage=usage)
parser.add_option('-c', '--commit', dest='commit', action='store',
default="",
help="Check if the specified commit (hash) introduces "
"undefined Kconfig symbols.")
parser.add_option('-d', '--diff', dest='diff', action='store',
default="",
help="Diff undefined symbols between two commits. The "
"input format bases on Git log's "
"\'commmit1..commit2\'.")
parser.add_option('-f', '--find', dest='find', action='store_true',
default=False,
help="Find and show commits that may cause symbols to be "
"missing. Required to run with --diff.")
parser.add_option('-i', '--ignore', dest='ignore', action='store',
default="",
help="Ignore files matching this pattern. Note that "
"the pattern needs to be a Python regex. To "
"ignore defconfigs, specify -i '.*defconfig'.")
parser.add_option('-s', '--sim', dest='sim', action='store', default="",
help="Print a list of maximum 10 string-similar symbols.")
parser.add_option('', '--force', dest='force', action='store_true',
default=False,
help="Reset current Git tree even when it's dirty.")
(opts, _) = parser.parse_args()
if opts.commit and opts.diff:
sys.exit("Please specify only one option at once.")
if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
sys.exit("Please specify valid input in the following format: "
"\'commmit1..commit2\'")
if opts.commit or opts.diff:
if not opts.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
if opts.commit:
opts.find = False
if opts.ignore:
try:
re.match(opts.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
return opts
def main():
"""Main function of this module."""
opts = parse_options()
if opts.sim and not opts.commit and not opts.diff:
sims = find_sims(opts.sim, opts.ignore)
if sims:
print "%s: %s" % (yel("Similar symbols"), ', '.join(sims))
else:
print "%s: no similar symbols found" % yel("Similar symbols")
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
if opts.commit or opts.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if opts.commit:
commit_a = opts.commit + "~"
commit_b = opts.commit
elif opts.diff:
split = opts.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
execute("git reset --hard %s" % commit_a)
undefined_a, _ = check_symbols(opts.ignore)
# get undefined items for the commit
execute("git reset --hard %s" % commit_b)
undefined_b, defined = check_symbols(opts.ignore)
# report cases that are present for the commit but not before
for feature in sorted(undefined_b):
# feature has not been undefined before
if not feature in undefined_a:
files = sorted(undefined_b.get(feature))
undefined[feature] = files
# check if there are new files that reference the undefined feature
else:
files = sorted(undefined_b.get(feature) -
undefined_a.get(feature))
if files:
undefined[feature] = files
# reset to head
execute("git reset --hard %s" % head)
# default to check the entire tree
else:
undefined, defined = check_symbols(opts.ignore)
# now print the output
for feature in sorted(undefined):
print red(feature)
files = sorted(undefined.get(feature))
print "%s: %s" % (yel("Referencing files"), ", ".join(files))
sims = find_sims(feature, opts.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
print "%s: %s" % (sims_out, ', '.join(sims))
else:
print "%s: %s" % (sims_out, "no similar symbols found")
if opts.find:
print "%s:" % yel("Commits changing symbol")
commits = find_commits(feature, opts.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1])
else:
print "\t- no commit found"
print # new line
def yel(string):
"""
Color %string yellow.
"""
return "\033[33m%s\033[0m" % string
def red(string):
"""
Color %string red.
"""
return "\033[31m%s\033[0m" % string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
(stdout, _) = pop.communicate() # wait until finished
if pop.returncode != 0:
sys.exit(stdout)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s"
% (symbol, diff))
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute("git status --porcelain")
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute("git rev-parse HEAD")
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in xrange(size)]
def init_worker():
"""Set signal handler to ignore SIGINT."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def find_sims(symbol, ignore, defined = []):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
pool = Pool(cpu_count(), init_worker)
kfiles = []
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kfiles.append(gitfile)
arglist = []
for part in partition(kfiles, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined.extend(res[0])
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
stdout = execute("git ls-files")
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
files = []
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
files.append(gitfile)
return files
def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value. Files matching %ignore are not
checked for undefined symbols."""
pool = Pool(cpu_count(), init_worker)
try:
return check_symbols_helper(pool, ignore)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
def check_symbols_helper(pool, ignore):
"""Helper method for check_symbols(). Used to catch keyboard interrupts in
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
defined_features = []
referenced_features = dict() # {file: [features]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
if ignore and not re.match(ignore, gitfile):
continue
# add source files that do not match the ignore pattern
source_files.append(gitfile)
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
referenced_features.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined_features.extend(res[0])
referenced_features.update(res[1])
defined_features = set(defined_features)
# inverse mapping of referenced_features to dict(feature: [files])
inv_map = dict()
for _file, features in referenced_features.iteritems():
for feature in features:
inv_map[feature] = inv_map.get(feature, set())
inv_map[feature].add(_file)
referenced_features = inv_map
undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
feature == "FOO_BAR" or feature == "XXX":
continue
if feature not in defined_features:
if feature.endswith("_MODULE"):
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
undefined[feature] = referenced_features.get(feature)
return undefined, defined_features
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
referenced_features = dict()
for sfile in source_files:
referenced_features[sfile] = parse_source_file(sfile)
return referenced_features
def parse_source_file(sfile):
"""Parse @sfile and return a list of referenced Kconfig features."""
lines = []
references = []
if not os.path.exists(sfile):
return references
with open(sfile, "r") as stream:
lines = stream.readlines()
for line in lines:
if not "CONFIG_" in line:
continue
features = REGEX_SOURCE_FEATURE.findall(line)
for feature in features:
if not REGEX_FILTER_FEATURES.search(feature):
continue
references.append(feature)
return references
def get_features_in_line(line):
"""Return mentioned Kconfig features in @line."""
return REGEX_FEATURE.findall(line)
def parse_kconfig_files(args):
"""Parse kconfig files and return tuple of defined and references Kconfig
symbols. Note, @args is a tuple of a list of files and the @ignore
pattern."""
kconfig_files = args[0]
ignore = args[1]
defined_features = []
referenced_features = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
defined_features.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
referenced_features[kfile] = references
return (defined_features, referenced_features)
def parse_kconfig_file(kfile):
"""Parse @kfile and update feature definitions and references."""
lines = []
defined = []
references = []
skip = False
if not os.path.exists(kfile):
return defined, references
with open(kfile, "r") as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
feature_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(feature_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
features = get_features_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
features.extend(get_features_in_line(line))
for feature in set(features):
if REGEX_NUMERIC.match(feature):
# ignore numeric values
continue
references.append(feature)
return defined, references
if __name__ == "__main__":
main()
|
gpl-2.0
|
ridfrustum/lettuce
|
tests/integration/lib/Django-1.3/django/core/management/commands/inspectdb.py
|
203
|
7614
|
import keyword
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield ''
yield 'from %s import models' % self.db_module
yield ''
for table_name in connection.introspection.get_table_list(cursor):
yield 'class %s(models.Model):' % table2model(table_name)
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
column_name = row[0]
att_name = column_name.lower()
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
# If the column name can't be used verbatim as a Python
# attribute, set the "db_column" for this Field.
if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
extra_params['db_column'] = column_name
# Modify the field name to make it Python-compatible.
if ' ' in att_name:
att_name = att_name.replace(' ', '_')
comment_notes.append('Field renamed to remove spaces.')
if '-' in att_name:
att_name = att_name.replace('-', '_')
comment_notes.append('Field renamed to remove dashes.')
if column_name != att_name:
comment_notes.append('Field name made lowercase.')
if i in relations:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
field_type = 'ForeignKey(%s' % rel_to
if att_name.endswith('_id'):
att_name = att_name[:-3]
else:
extra_params['db_column'] = column_name
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
field_type += '('
if keyword.iskeyword(att_name):
att_name += '_field'
comment_notes.append('Field renamed because it was a Python reserved word.')
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [' class Meta:',
' db_table = %r' % table_name,
'']
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.